Beispiel #1
0
class Deployer(NucypherTokenActor):

    # Registry of deployer classes
    deployer_classes = (
        NucypherTokenDeployer,
        StakingEscrowDeployer,
        PolicyManagerDeployer,
        AdjudicatorDeployer,
        UserEscrowProxyDeployer,
    )

    contract_names = tuple(a.registry_contract_name
                           for a in EthereumContractAgent.__subclasses__())

    __interface_class = BlockchainDeployerInterface

    class UnknownContract(ValueError):
        pass

    def __init__(self,
                 blockchain: BlockchainInterface,
                 deployer_address: str = None,
                 client_password: str = None,
                 bare: bool = True) -> None:

        self.blockchain = blockchain
        self.__deployer_address = NO_DEPLOYER_ADDRESS
        self.deployer_address = deployer_address
        self.checksum_address = self.deployer_address

        if not bare:
            self.token_agent = NucypherTokenAgent(blockchain=blockchain)
            self.staking_agent = StakingEscrowAgent(blockchain=blockchain)
            self.policy_agent = PolicyAgent(blockchain=blockchain)
            self.adjudicator_agent = AdjudicatorAgent(blockchain=blockchain)

        self.user_escrow_deployers = dict()
        self.deployers = {d.contract_name: d for d in self.deployer_classes}

        blockchain.transacting_power = TransactingPower(
            blockchain=blockchain,
            account=deployer_address,
            password=client_password)
        blockchain.transacting_power.activate()
        self.log = Logger("Deployment-Actor")

    def __repr__(self):
        r = '{name}({blockchain}, {deployer_address})'.format(
            name=self.__class__.__name__,
            blockchain=self.blockchain,
            deployer_address=self.deployer_address)
        return r

    @property
    def deployer_address(self):
        return self.blockchain.deployer_address

    @deployer_address.setter
    def deployer_address(self, value):
        """Used for validated post-init setting of deployer's address"""
        self.blockchain.deployer_address = value

    @property
    def token_balance(self) -> NU:
        if self.token_agent is CONTRACT_NOT_DEPLOYED:
            message = f"{self.token_agent.contract_name} contract is not deployed, or the registry has missing records."
            raise self.ActorError(message)
        return super().token_balance

    def __get_deployer(self, contract_name: str):
        try:
            Deployer = self.deployers[contract_name]
        except KeyError:
            raise self.UnknownContract(contract_name)
        return Deployer

    def deploy_contract(
        self,
        contract_name: str,
        gas_limit: int = None,
        plaintext_secret: str = None,
    ) -> Tuple[dict, ContractDeployer]:

        Deployer = self.__get_deployer(contract_name=contract_name)
        deployer = Deployer(blockchain=self.blockchain,
                            deployer_address=self.deployer_address)
        if Deployer._upgradeable:
            if not plaintext_secret:
                raise ValueError(
                    "Upgrade plaintext_secret must be passed to deploy an upgradeable contract."
                )
            secret_hash = keccak(bytes(plaintext_secret, encoding='utf-8'))
            txhashes = deployer.deploy(secret_hash=secret_hash,
                                       gas_limit=gas_limit)
        else:
            txhashes = deployer.deploy(gas_limit=gas_limit)
        return txhashes, deployer

    def upgrade_contract(self, contract_name: str,
                         existing_plaintext_secret: str,
                         new_plaintext_secret: str) -> dict:
        Deployer = self.__get_deployer(contract_name=contract_name)
        deployer = Deployer(blockchain=self.blockchain,
                            deployer_address=self.deployer_address)
        new_secret_hash = keccak(bytes(new_plaintext_secret, encoding='utf-8'))
        txhashes = deployer.upgrade(existing_secret_plaintext=bytes(
            existing_plaintext_secret, encoding='utf-8'),
                                    new_secret_hash=new_secret_hash)
        return txhashes

    def rollback_contract(self, contract_name: str,
                          existing_plaintext_secret: str,
                          new_plaintext_secret: str):
        Deployer = self.__get_deployer(contract_name=contract_name)
        deployer = Deployer(blockchain=self.blockchain,
                            deployer_address=self.deployer_address)
        new_secret_hash = keccak(bytes(new_plaintext_secret, encoding='utf-8'))
        txhash = deployer.rollback(existing_secret_plaintext=bytes(
            existing_plaintext_secret, encoding='utf-8'),
                                   new_secret_hash=new_secret_hash)
        return txhash

    def deploy_user_escrow(self, allocation_registry: AllocationRegistry):
        user_escrow_deployer = UserEscrowDeployer(
            blockchain=self.blockchain,
            deployer_address=self.deployer_address,
            allocation_registry=allocation_registry)
        user_escrow_deployer.deploy()
        principal_address = user_escrow_deployer.contract.address
        self.user_escrow_deployers[principal_address] = user_escrow_deployer
        return user_escrow_deployer

    def deploy_network_contracts(
        self,
        staker_secret: str,
        policy_secret: str,
        adjudicator_secret: str,
        user_escrow_proxy_secret: str,
    ) -> Tuple[dict, dict]:
        """
        Musketeers, if you will; Deploy the "big three" contracts to the blockchain.
        """

        token_txs, token_deployer = self.deploy_contract(
            contract_name='NuCypherToken')
        staking_txs, staking_deployer = self.deploy_contract(
            contract_name='StakingEscrow', plaintext_secret=staker_secret)
        policy_txs, policy_deployer = self.deploy_contract(
            contract_name='PolicyManager', plaintext_secret=policy_secret)
        adjudicator_txs, adjudicator_deployer = self.deploy_contract(
            contract_name='Adjudicator', plaintext_secret=adjudicator_secret)
        user_escrow_proxy_txs, user_escrow_proxy_deployer = self.deploy_contract(
            contract_name='UserEscrowProxy',
            plaintext_secret=user_escrow_proxy_secret)

        deployers = (
            token_deployer,
            staking_deployer,
            policy_deployer,
            adjudicator_deployer,
            user_escrow_proxy_deployer,
        )

        txhashes = {
            NucypherTokenDeployer.contract_name: token_txs,
            StakingEscrowDeployer.contract_name: staking_txs,
            PolicyManagerDeployer.contract_name: policy_txs,
            AdjudicatorDeployer.contract_name: adjudicator_txs,
            UserEscrowProxyDeployer.contract_name: user_escrow_proxy_txs,
        }

        deployers = {
            deployer.contract_name: deployer
            for deployer in deployers
        }
        return txhashes, deployers

    def deploy_beneficiary_contracts(
        self,
        allocations: List[Dict[str, Union[str, int]]],
        allocation_outfile: str = None,
        allocation_registry: AllocationRegistry = None,
        crash_on_failure: bool = True,
    ) -> Dict[str, dict]:
        """

        Example allocation dataset (one year is 31536000 seconds):

        data = [{'address': '0xdeadbeef', 'amount': 100, 'duration': 31536000},
                {'address': '0xabced120', 'amount': 133432, 'duration': 31536000*2},
                {'address': '0xf7aefec2', 'amount': 999, 'duration': 31536000*3}]
        """
        if allocation_registry and allocation_outfile:
            raise self.ActorError(
                "Pass either allocation registry or allocation_outfile, not both."
            )
        if allocation_registry is None:
            allocation_registry = AllocationRegistry(
                registry_filepath=allocation_outfile)

        allocation_txhashes, failed = dict(), list()
        for allocation in allocations:
            deployer = self.deploy_user_escrow(
                allocation_registry=allocation_registry)

            try:
                txhashes = deployer.deliver(
                    value=allocation['amount'],
                    duration=allocation['duration'],
                    beneficiary_address=allocation['address'])
            except TransactionFailed:
                if crash_on_failure:
                    raise
                self.log.debug(
                    f"Failed allocation transaction for {allocation['amount']} to {allocation['address']}"
                )
                failed.append(allocation)
                continue

            else:
                allocation_txhashes[allocation['address']] = txhashes

        if failed:
            # TODO: More with these failures: send to isolated logfile, and reattempt
            self.log.critical(
                f"FAILED TOKEN ALLOCATION - {len(failed)} Allocations failed.")

        return allocation_txhashes

    @staticmethod
    def __read_allocation_data(filepath: str) -> list:
        with open(filepath, 'r') as allocation_file:
            data = allocation_file.read()
            try:
                allocation_data = json.loads(data)
            except JSONDecodeError:
                raise
        return allocation_data

    def deploy_beneficiaries_from_file(self,
                                       allocation_data_filepath: str,
                                       allocation_outfile: str = None) -> dict:

        allocations = self.__read_allocation_data(
            filepath=allocation_data_filepath)
        txhashes = self.deploy_beneficiary_contracts(
            allocations=allocations, allocation_outfile=allocation_outfile)
        return txhashes

    def save_deployment_receipts(self, transactions: dict) -> str:
        filename = f'deployment-receipts-{self.deployer_address[:6]}-{maya.now().epoch}.json'
        filepath = os.path.join(DEFAULT_CONFIG_ROOT, filename)
        # TODO: Do not assume default config root
        os.makedirs(DEFAULT_CONFIG_ROOT, exist_ok=True)
        with open(filepath, 'w') as file:
            data = dict()
            for contract_name, transactions in transactions.items():
                contract_records = dict()
                for tx_name, txhash in transactions.items():
                    receipt = self.blockchain.client.wait_for_receipt(
                        txhash, timeout=self.blockchain.TIMEOUT)
                    receipt = {
                        item: str(result)
                        for item, result in receipt.items()
                    }
                    contract_records.update(
                        {tx_name: receipt
                         for tx_name in transactions})
                data[contract_name] = contract_records
            data = json.dumps(data, indent=4)
            file.write(data)
        return filepath
Beispiel #2
0
class WorkTracker:

    CLOCK = reactor
    REFRESH_RATE = 60 * 15  # Fifteen minutes

    def __init__(self, worker, refresh_rate: int = None, *args, **kwargs):

        super().__init__(*args, **kwargs)
        self.log = Logger('stake-tracker')

        self.worker = worker
        self.staking_agent = self.worker.staking_agent

        self._refresh_rate = refresh_rate or self.REFRESH_RATE
        self._tracking_task = task.LoopingCall(self._do_work)
        self._tracking_task.clock = self.CLOCK

        self.__current_period = None
        self.__start_time = NOT_STAKING
        self.__uptime_period = NOT_STAKING
        self._abort_on_error = True

    @property
    def current_period(self):
        return self.__current_period

    def stop(self) -> None:
        self._tracking_task.stop()
        self.log.info(f"STOPPED WORK TRACKING")

    def start(self, act_now: bool = False, force: bool = False) -> None:
        """
        High-level stake tracking initialization, this function aims
        to be safely called at any time - For example, it is okay to call
        this function multiple times within the same period.
        """
        if self._tracking_task.running and not force:
            return

        # Record the start time and period
        self.__start_time = maya.now()
        self.__uptime_period = self.staking_agent.get_current_period()
        self.__current_period = self.__uptime_period

        d = self._tracking_task.start(interval=self._refresh_rate)
        d.addErrback(self.handle_working_errors)
        self.log.info(f"STARTED WORK TRACKING")

        if act_now:
            self._do_work()

    def _crash_gracefully(self, failure=None) -> None:
        """
        A facility for crashing more gracefully in the event that
        an exception is unhandled in a different thread.
        """
        self._crashed = failure
        failure.raiseException()

    def handle_working_errors(self, *args, **kwargs) -> None:
        failure = args[0]
        if self._abort_on_error:
            self.log.critical(
                f"Unhandled error during node work tracking. {failure}")
            reactor.callFromThread(self._crash_gracefully, failure=failure)
        else:
            self.log.warn(
                f"Unhandled error during work tracking: {failure.getTraceback()}"
            )

    def _do_work(self) -> None:
        # TODO: Check for stake expiration and exit
        # TODO: Follow-up actions for downtime

        # Update on-chain status
        self.log.info(
            f"Checking for new period. Current period is {self.__current_period}"
        )
        onchain_period = self.staking_agent.get_current_period(
        )  # < -- Read from contract
        if self.current_period != onchain_period:
            self.__current_period = onchain_period
            # self.worker.stakes.refresh()  # TODO: Track stakes

        # Measure working interval
        interval = onchain_period - self.worker.last_active_period
        if interval < 0:
            return  # No need to confirm this period.  Save the gas.
        if interval > 0:
            self.log.warn(
                f"MISSED CONFIRMATIONS - {interval} missed staking confirmations detected."
            )

        # Confirm Activity
        self.log.info("Confirmed activity for period {}".format(
            self.current_period))
        transacting_power = self.worker.transacting_power
        with transacting_power:
            self.worker.confirm_activity()  # < --- blockchain WRITE
Beispiel #3
0
class Modem(object):
    '''
    classdocs
    '''

    def __init__(self, protocol, event_fct=None):
        '''
        Constructor
        '''
        self.log = Logger()
        self.first = True
        self.event = event_fct
        self.callback = None
        self.wait = False
        self.response = ''
        self.protocol = protocol
        self.protocol.addCallback(self.receive)
        self.resp_re = re.compile(
                    r'^OK|ERROR|(\+CM[ES] ERROR: \d+)|(COMMAND NOT SUPPORT)$')
        
    def receive(self, line):
        if self.wait:
            if self.resp_re.match(line):
                self.wait = False
                self.response.append(line)
                if line.startswith('ERROR'):
                    self.log.critical('error from Modem: %s' % line)
                    if self.callback:
                        self.callback.errback(self.response)
                else:
                    if self.callback:
                        self.callback.callback(self.response)
                self.response = ''
                if self.callback:
                    self.callback = None
            else:
                self.response.append(line)
        elif self.event:
            self.event(line)   
        else:
            self.log.debug('unmanaged message from Modem: %s' % line)
            
    def sendsms(self, recipient, message, callback_fct=None):
        def recipient_set(res):
            self.log.debug(
                'do we have > ? ==> %s' % ('OK' if res == '>' else 'No: ' + res))
            self.callback = Deferred
            if callback_fct:
                self.callback.addCallback(callback_fct)
            self.wait = True
            self.protocol.write(message + b'\x1a')
        def text_mode(res):
            self.callback = Deferred
            self.callback.addCallback(recipient_set)
            self.wait = True
            self.protocol.write(b'AT+CMGS="' + recipient.encode() + b'"\r')
        def modem_init(res):
            self.first = False
            self.callback = Deferred
            self.callback.addCallback(text_mode)
            self.wait = True
            self.protocol.write(b'AT+CMGF=1\r')
        if self.first:
            self.wait = True
            self.callback = Deferred()
            self.callback.addCallback(modem_init)
            self.protocol.write(b'ATZ\r')
        else:
            modem_init('OK')
        
                
Beispiel #4
0
class Felix(Character, NucypherTokenActor):
    """
    A NuCypher ERC20 faucet / Airdrop scheduler.

    Felix is a web application that gives NuCypher *testnet* tokens to registered addresses
    with a scheduled reduction of disbursement amounts, and an HTTP endpoint
    for handling new address registration.

    The main goal of Felix is to provide a source of testnet tokens for
    research and the development of production-ready nucypher dApps.
    """

    _default_crypto_powerups = [SigningPower]

    # Intervals
    DISTRIBUTION_INTERVAL = 60  # seconds
    DISBURSEMENT_INTERVAL = 24 * 365  # only distribute tokens to the same address once each YEAR.
    STAGING_DELAY = 10  # seconds

    # Disbursement
    BATCH_SIZE = 10  # transactions
    MULTIPLIER = Decimal(
        '0.9')  # 10% reduction of previous disbursement is 0.9
    # this is not relevant until the year of time declared above, passes.
    MINIMUM_DISBURSEMENT = int(1e18)  # NuNits (1 NU)
    ETHER_AIRDROP_AMOUNT = int(1e17)  # Wei (.1 ether)
    MAX_INDIVIDUAL_REGISTRATIONS = 3  # Registration Limit

    # Node Discovery
    LEARNING_TIMEOUT = 30  # seconds
    _SHORT_LEARNING_DELAY = 60  # seconds
    _LONG_LEARNING_DELAY = 120  # seconds
    _ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN = 1

    # Twisted
    _CLOCK = reactor
    _AIRDROP_QUEUE = dict()

    class NoDatabase(RuntimeError):
        pass

    def __init__(self,
                 db_filepath: str,
                 rest_host: str,
                 rest_port: int,
                 client_password: str = None,
                 crash_on_error: bool = False,
                 distribute_ether: bool = True,
                 registry: BaseContractRegistry = None,
                 *args,
                 **kwargs):

        # Character
        super().__init__(registry=registry, *args, **kwargs)
        self.log = Logger(f"felix-{self.checksum_address[-6::]}")

        # Network
        self.rest_port = rest_port
        self.rest_host = rest_host
        self.rest_app = NOT_RUNNING
        self.crash_on_error = crash_on_error

        # Database
        self.db_filepath = db_filepath
        self.db = NO_DATABASE_AVAILABLE
        self.db_engine = create_engine(f'sqlite:///{self.db_filepath}',
                                       convert_unicode=True)

        # Blockchain
        transacting_power = TransactingPower(password=client_password,
                                             account=self.checksum_address,
                                             cache=True)
        self._crypto_power.consume_power_up(transacting_power)

        self.token_agent = ContractAgency.get_agent(NucypherTokenAgent,
                                                    registry=registry)
        self.blockchain = self.token_agent.blockchain
        self.reserved_addresses = [
            self.checksum_address, BlockchainInterface.NULL_ADDRESS
        ]

        # Update reserved addresses with deployed contracts
        existing_entries = list(registry.enrolled_addresses)
        self.reserved_addresses.extend(existing_entries)

        # Distribution
        self.__distributed = 0  # Track NU Output
        self.__airdrop = 0  # Track Batch
        self.__disbursement = 0  # Track Quantity
        self._distribution_task = LoopingCall(f=self.airdrop_tokens)
        self._distribution_task.clock = self._CLOCK
        self.start_time = NOT_RUNNING

        self.economics = EconomicsFactory.get_economics(registry=registry)
        self.MAXIMUM_DISBURSEMENT = self.economics.maximum_allowed_locked
        self.INITIAL_DISBURSEMENT = self.economics.minimum_allowed_locked * 3

        # Optionally send ether with each token transaction
        self.distribute_ether = distribute_ether
        # Banner
        self.log.info(FELIX_BANNER.format(self.checksum_address))

    def __repr__(self):
        class_name = self.__class__.__name__
        r = f'{class_name}(checksum_address={self.checksum_address}, db_filepath={self.db_filepath})'
        return r

    def make_web_app(self):
        from flask import request
        from flask_sqlalchemy import SQLAlchemy

        # WSGI/Flask Service
        short_name = bytes(self.stamp).hex()[:6]
        self.rest_app = Flask(f"faucet-{short_name}",
                              template_folder=TEMPLATES_DIR)
        self.rest_app.config[
            'SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{self.db_filepath}'
        try:
            self.rest_app.secret_key = sha256(
                os.environ['NUCYPHER_FELIX_DB_SECRET'].encode())  # uses envvar
        except KeyError:
            raise OSError(
                "The 'NUCYPHER_FELIX_DB_SECRET' is not set.  Export your application secret and try again."
            )

        # Database
        self.db = SQLAlchemy(self.rest_app)

        # Database Tables
        class Recipient(self.db.Model):
            """
            The one and only table in Felix's database; Used to track recipients and airdrop metadata.
            """

            __tablename__ = 'recipient'

            id = self.db.Column(self.db.Integer, primary_key=True)
            address = self.db.Column(self.db.String, nullable=False)
            joined = self.db.Column(self.db.DateTime,
                                    nullable=False,
                                    default=datetime.utcnow)
            total_received = self.db.Column(self.db.String,
                                            default='0',
                                            nullable=False)
            last_disbursement_amount = self.db.Column(self.db.String,
                                                      nullable=False,
                                                      default=0)
            last_disbursement_time = self.db.Column(self.db.DateTime,
                                                    nullable=True,
                                                    default=None)
            is_staking = self.db.Column(self.db.Boolean,
                                        nullable=False,
                                        default=False)

            def __repr__(self):
                return f'{self.__class__.__name__}(id={self.id})'

        self.Recipient = Recipient  # Bind to outer class

        # Flask decorators
        rest_app = self.rest_app

        #
        # REST Routes
        #
        @rest_app.route("/status", methods=['GET'])
        def status():
            with ThreadedSession(self.db_engine) as session:
                total_recipients = session.query(self.Recipient).count()
                last_recipient = session.query(self.Recipient).filter(
                    self.Recipient.last_disbursement_time.isnot(
                        None)).order_by('last_disbursement_time').first()

                last_address = last_recipient.address if last_recipient else None
                last_transaction_date = last_recipient.last_disbursement_time.isoformat(
                ) if last_recipient else None

                unfunded = session.query(self.Recipient).filter(
                    self.Recipient.last_disbursement_time.is_(None)).count()

                return json.dumps({
                    "total_recipients": total_recipients,
                    "latest_recipient": last_address,
                    "latest_disburse_date": last_transaction_date,
                    "unfunded_recipients": unfunded,
                    "state": {
                        "eth": str(self.eth_balance),
                        "NU": str(self.token_balance),
                        "address": self.checksum_address,
                        "contract_address": self.token_agent.contract_address,
                    }
                })

        @rest_app.route("/register", methods=['POST'])
        def register():
            """Handle new recipient registration via POST request."""

            new_address = (request.form.get('address')
                           or request.get_json().get('address'))

            if not new_address:
                return Response(response="no address was supplied", status=411)

            if not eth_utils.is_address(new_address):
                return Response(
                    response=
                    "an invalid ethereum address was supplied.  please ensure the address is a proper checksum.",
                    status=400)
            else:
                new_address = eth_utils.to_checksum_address(new_address)

            if new_address in self.reserved_addresses:
                return Response(
                    response=
                    "sorry, that address is reserved and cannot receive funds.",
                    status=403)

            try:
                with ThreadedSession(self.db_engine) as session:

                    existing = Recipient.query.filter_by(
                        address=new_address).all()
                    if len(existing) > self.MAX_INDIVIDUAL_REGISTRATIONS:
                        # Address already exists; Abort
                        self.log.debug(
                            f"{new_address} is already enrolled {self.MAX_INDIVIDUAL_REGISTRATIONS} times."
                        )
                        return Response(
                            response=
                            f"{new_address} requested too many times  -  Please use another address.",
                            status=409)

                    # Create the record
                    recipient = Recipient(address=new_address,
                                          joined=datetime.now())
                    session.add(recipient)
                    session.commit()

            except Exception as e:
                # Pass along exceptions to the logger
                self.log.critical(str(e))
                raise

            else:
                return Response(status=200)  # TODO

        return rest_app

    def create_tables(self) -> None:
        self.make_web_app()
        return self.db.create_all(app=self.rest_app)

    def start(self,
              host: str,
              port: int,
              web_services: bool = True,
              distribution: bool = True,
              crash_on_error: bool = False):

        self.crash_on_error = crash_on_error

        if self.start_time is not NOT_RUNNING:
            raise RuntimeError("Felix is already running.")

        self.start_time = maya.now()
        payload = {"wsgi": self.rest_app, "http_port": port}
        deployer = HendrixDeploy(action="start", options=payload)

        if distribution is True:
            self.start_distribution()

        if web_services is True:
            deployer.run()  # <-- Blocking call (Reactor)

    def start_distribution(self, now: bool = True) -> bool:
        """Start token distribution"""
        self.log.info(NU_BANNER)
        self.log.info("Starting NU Token Distribution | START")
        if self.token_balance == NU.ZERO():
            raise self.ActorError(
                f"Felix address {self.checksum_address} has 0 NU tokens.")
        self._distribution_task.start(interval=self.DISTRIBUTION_INTERVAL,
                                      now=now)
        return True

    def stop_distribution(self) -> bool:
        """Start token distribution"""
        self.log.info("Stopping NU Token Distribution | STOP")
        self._distribution_task.stop()
        return True

    def __calculate_disbursement(self, recipient) -> int:
        """Calculate the next reward for a recipient once the are selected for distribution"""

        # Initial Reward - sets the future rates
        if recipient.last_disbursement_time is None:
            amount = self.INITIAL_DISBURSEMENT

        # Cap reached, We'll continue to leak the minimum disbursement
        elif int(recipient.total_received) >= self.MAXIMUM_DISBURSEMENT:
            amount = self.MINIMUM_DISBURSEMENT

        # Calculate the next disbursement
        else:
            amount = math.ceil(
                int(recipient.last_disbursement_amount) * self.MULTIPLIER)
            if amount < self.MINIMUM_DISBURSEMENT:
                amount = self.MINIMUM_DISBURSEMENT

        return int(amount)

    def __transfer(self, disbursement: int, recipient_address: str) -> str:
        """Perform a single token transfer transaction from one account to another."""

        # Re-unlock from cache
        self.blockchain.transacting_power.activate()

        self.__disbursement += 1
        receipt = self.token_agent.transfer(
            amount=disbursement,
            target_address=recipient_address,
            sender_address=self.checksum_address)
        txhash = receipt['transactionHash']
        if self.distribute_ether:
            ether = self.ETHER_AIRDROP_AMOUNT
            transaction = {
                'to': recipient_address,
                'from': self.checksum_address,
                'value': ether,
                'gasPrice': self.blockchain.client.gas_price
            }
            ether_txhash = self.blockchain.client.send_transaction(transaction)

            self.log.info(
                f"Disbursement #{self.__disbursement} OK | NU {txhash.hex()[-6:]} | ETH {ether_txhash.hex()[:-6]} "
                f"({str(NU(disbursement, 'NuNit'))} + {self.ETHER_AIRDROP_AMOUNT} wei) -> {recipient_address}"
            )

        else:
            self.log.info(
                f"Disbursement #{self.__disbursement} OK | {txhash.hex()[-6:]} |"
                f"({str(NU(disbursement, 'NuNit'))} -> {recipient_address}")

        return txhash

    def airdrop_tokens(self):
        """
        Calculate airdrop eligibility via faucet registration
        and transfer tokens to selected recipients.
        """

        with ThreadedSession(self.db_engine) as session:
            population = session.query(self.Recipient).count()

        message = f"{population} registered faucet recipients; " \
                  f"Distributed {str(NU(self.__distributed, 'NuNit'))} since {self.start_time.slang_time()}."
        self.log.debug(message)
        if population == 0:
            return  # Abort - no recipients are registered.

        # For filtration
        since = datetime.now() - timedelta(hours=self.DISBURSEMENT_INTERVAL)

        datetime_filter = or_(self.Recipient.last_disbursement_time <= since,
                              self.Recipient.last_disbursement_time ==
                              None)  # This must be `==` not `is`

        with ThreadedSession(self.db_engine) as session:
            candidates = session.query(
                self.Recipient).filter(datetime_filter).all()
            if not candidates:
                self.log.info("No eligible recipients this round.")
                return

        # Discard invalid addresses, in-depth
        invalid_addresses = list()

        def siphon_invalid_entries(candidate):
            address_is_valid = eth_utils.is_checksum_address(candidate.address)
            if not address_is_valid:
                invalid_addresses.append(candidate.address)
            return address_is_valid

        candidates = list(filter(siphon_invalid_entries, candidates))

        if invalid_addresses:
            self.log.info(
                f"{len(invalid_addresses)} invalid entries detected. Pruning database."
            )

            # TODO: Is this needed? - Invalid entries are rejected at the endpoint view.
            # Prune database of invalid records
            # with ThreadedSession(self.db_engine) as session:
            #     bad_eggs = session.query(self.Recipient).filter(self.Recipient.address in invalid_addresses).all()
            #     for egg in bad_eggs:
            #         session.delete(egg.id)
            #     session.commit()

        if not candidates:
            self.log.info("No eligible recipients this round.")
            return

        d = threads.deferToThread(self.__do_airdrop, candidates=candidates)
        self._AIRDROP_QUEUE[self.__airdrop] = d
        return d

    def __do_airdrop(self, candidates: list):

        self.log.info(f"Staging Airdrop #{self.__airdrop}.")

        # Staging
        staged_disbursements = [(r, self.__calculate_disbursement(recipient=r))
                                for r in candidates]
        batches = list(
            staged_disbursements[index:index + self.BATCH_SIZE]
            for index in range(0, len(staged_disbursements), self.BATCH_SIZE))
        total_batches = len(batches)

        self.log.info("====== Staged Airdrop ======")
        for recipient, disbursement in staged_disbursements:
            self.log.info(f"{recipient.address} ... {str(disbursement)[:-18]}")
        self.log.info("==========================")

        # Staging Delay
        self.log.info(
            f"Airdrop will commence in {self.STAGING_DELAY} seconds...")
        if self.STAGING_DELAY > 3:
            time.sleep(self.STAGING_DELAY - 3)
        for i in range(3):
            time.sleep(1)
            self.log.info(f"NU Token airdrop starting in {3 - i} seconds...")

        # Slowly, in series...
        for batch, staged_disbursement in enumerate(batches, start=1):
            self.log.info(f"======= Batch #{batch} ========")

            for recipient, disbursement in staged_disbursement:

                # Perform the transfer... leaky faucet.
                self.__transfer(disbursement=disbursement,
                                recipient_address=recipient.address)
                self.__distributed += disbursement

                # Update the database record
                recipient.last_disbursement_amount = str(disbursement)
                recipient.total_received = str(
                    int(recipient.total_received) + disbursement)
                recipient.last_disbursement_time = datetime.now()

                self.db.session.add(recipient)
                self.db.session.commit()

            # end inner loop
            self.log.info(
                f"Completed Airdrop #{self.__airdrop} Batch #{batch} of {total_batches}."
            )

        # end outer loop
        now = maya.now()
        next_interval_slang = now.add(
            seconds=self.DISTRIBUTION_INTERVAL).slang_time()
        self.log.info(
            f"Completed Airdrop #{self.__airdrop}; Next airdrop is {next_interval_slang}."
        )

        del self._AIRDROP_QUEUE[self.__airdrop]
        self.__airdrop += 1
Beispiel #5
0
class Miner(NucypherTokenActor):
    """
    Ursula baseclass for blockchain operations, practically carrying a pickaxe.
    """

    __current_period_sample_rate = 10

    class MinerError(NucypherTokenActor.ActorError):
        pass

    def __init__(self, is_me: bool, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self.log = Logger("miner")
        self.is_me = is_me

        if is_me:
            self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)

            # Staking Loop
            self.__current_period = None
            self._abort_on_staking_error = True
            self._staking_task = task.LoopingCall(self._confirm_period)

        else:
            self.token_agent = constants.STRANGER_MINER

        # Everyone!
        self.miner_agent = MinerAgent(blockchain=self.blockchain)

    #
    # Staking
    #
    @only_me
    def stake(self,
              confirm_now=False,
              resume: bool = False,
              expiration: maya.MayaDT = None,
              lock_periods: int = None,
              *args, **kwargs) -> None:

        """High-level staking daemon loop"""

        if lock_periods and expiration:
            raise ValueError("Pass the number of lock periods or an expiration MayaDT; not both.")
        if expiration:
            lock_periods = datetime_to_period(expiration)

        if resume is False:
            _staking_receipts = self.initialize_stake(expiration=expiration,
                                                      lock_periods=lock_periods,
                                                      *args, **kwargs)

        # TODO: Check if this period has already been confirmed
        # TODO: Check if there is an active stake in the current period: Resume staking daemon
        # TODO: Validation and Sanity checks

        if confirm_now:
            self.confirm_activity()

        # record start time and periods
        self.__start_time = maya.now()
        self.__uptime_period = self.miner_agent.get_current_period()
        self.__terminal_period = self.__uptime_period + lock_periods
        self.__current_period = self.__uptime_period
        self.start_staking_loop()

        #
        # Daemon
        #

    @only_me
    def _confirm_period(self):

        period = self.miner_agent.get_current_period()
        self.log.info("Checking for new period. Current period is {}".format(self.__current_period))  # TODO:  set to debug?

        if self.__current_period != period:

            # check for stake expiration
            stake_expired = self.__current_period >= self.__terminal_period
            if stake_expired:
                self.log.info('Stake duration expired')
                return True

            self.confirm_activity()
            self.__current_period = period
            self.log.info("Confirmed activity for period {}".format(self.__current_period))

    @only_me
    def _crash_gracefully(self, failure=None):
        """
        A facility for crashing more gracefully in the event that an exception
        is unhandled in a different thread, especially inside a loop like the learning loop.
        """
        self._crashed = failure
        failure.raiseException()

    @only_me
    def handle_staking_errors(self, *args, **kwargs):
        failure = args[0]
        if self._abort_on_staking_error:
            self.log.critical("Unhandled error during node staking.  Attempting graceful crash.")
            reactor.callFromThread(self._crash_gracefully, failure=failure)
        else:
            self.log.warn("Unhandled error during node learning: {}".format(failure.getTraceback()))

    @only_me
    def start_staking_loop(self, now=True):
        if self._staking_task.running:
            return False
        else:
            d = self._staking_task.start(interval=self.__current_period_sample_rate, now=now)
            d.addErrback(self.handle_staking_errors)
            self.log.info("Started staking loop")
            return d

    @property
    def is_staking(self):
        """Checks if this Miner currently has locked tokens."""
        return bool(self.locked_tokens > 0)

    @property
    def locked_tokens(self):
        """Returns the amount of tokens this miner has locked."""
        return self.miner_agent.get_locked_tokens(miner_address=self.checksum_public_address)

    @property
    def stakes(self) -> Tuple[list]:
        """Read all live stake data from the blockchain and return it as a tuple"""
        stakes_reader = self.miner_agent.get_all_stakes(miner_address=self.checksum_public_address)
        return tuple(stakes_reader)

    @only_me
    def deposit(self, amount: int, lock_periods: int) -> Tuple[str, str]:
        """Public facing method for token locking."""

        approve_txhash = self.token_agent.approve_transfer(amount=amount,
                                                           target_address=self.miner_agent.contract_address,
                                                           sender_address=self.checksum_public_address)

        deposit_txhash = self.miner_agent.deposit_tokens(amount=amount,
                                                         lock_periods=lock_periods,
                                                         sender_address=self.checksum_public_address)

        return approve_txhash, deposit_txhash

    @only_me
    def divide_stake(self,
                     stake_index: int,
                     target_value: int,
                     additional_periods: int = None,
                     expiration: maya.MayaDT = None) -> dict:
        """
        Modifies the unlocking schedule and value of already locked tokens.

        This actor requires that is_me is True, and that the expiration datetime is after the existing
        locking schedule of this miner, or an exception will be raised.

        :param target_value:  The quantity of tokens in the smallest denomination.
        :param expiration: The new expiration date to set.
        :return: Returns the blockchain transaction hash

        """

        if additional_periods and expiration:
            raise ValueError("Pass the number of lock periods or an expiration MayaDT; not both.")

        _first_period, last_period, locked_value = self.miner_agent.get_stake_info(
            miner_address=self.checksum_public_address, stake_index=stake_index)
        if expiration:
            additional_periods = datetime_to_period(datetime=expiration) - last_period

            if additional_periods <= 0:
                raise self.MinerError("Expiration {} must be at least 1 period from now.".format(expiration))

        if target_value >= locked_value:
            raise self.MinerError("Cannot divide stake; Value must be less than the specified stake value.")

        # Ensure both halves are for valid amounts
        validate_stake_amount(amount=target_value)
        validate_stake_amount(amount=locked_value - target_value)

        tx = self.miner_agent.divide_stake(miner_address=self.checksum_public_address,
                                           stake_index=stake_index,
                                           target_value=target_value,
                                           periods=additional_periods)

        self.blockchain.wait_for_receipt(tx)
        return tx

    @only_me
    def __validate_stake(self, amount: int, lock_periods: int) -> bool:

        assert validate_stake_amount(amount=amount)  # TODO: remove assertions..?
        assert validate_locktime(lock_periods=lock_periods)

        if not self.token_balance >= amount:
            raise self.MinerError("Insufficient miner token balance ({balance})".format(balance=self.token_balance))
        else:
            return True

    @only_me
    def initialize_stake(self,
                         amount: int,
                         lock_periods: int = None,
                         expiration: maya.MayaDT = None,
                         entire_balance: bool = False) -> dict:
        """
        High level staking method for Miners.

        :param amount: Amount of tokens to stake denominated in the smallest unit.
        :param lock_periods: Duration of stake in periods.
        :param expiration: A MayaDT object representing the time the stake expires; used to calculate lock_periods.
        :param entire_balance: If True, stake the entire balance of this node, or the maximum possible.

        """

        if lock_periods and expiration:
            raise ValueError("Pass the number of lock periods or an expiration MayaDT; not both.")
        if entire_balance and amount:
            raise self.MinerError("Specify an amount or entire balance, not both")

        if expiration:
            lock_periods = calculate_period_duration(future_time=expiration)
        if entire_balance is True:
            amount = self.token_balance

        staking_transactions = OrderedDict()  # type: OrderedDict # Time series of txhases

        # Validate
        assert self.__validate_stake(amount=amount, lock_periods=lock_periods)

        # Transact
        approve_txhash, initial_deposit_txhash = self.deposit(amount=amount, lock_periods=lock_periods)
        self._transaction_cache.append((datetime.utcnow(), initial_deposit_txhash))

        self.log.info("{} Initialized new stake: {} tokens for {} periods".format(self.checksum_public_address, amount, lock_periods))
        return staking_transactions

    #
    # Reward and Collection
    #

    @only_me
    def confirm_activity(self) -> str:
        """Miner rewarded for every confirmed period"""

        txhash = self.miner_agent.confirm_activity(node_address=self.checksum_public_address)
        self._transaction_cache.append((datetime.utcnow(), txhash))

        return txhash

    @only_me
    def mint(self) -> Tuple[str, str]:
        """Computes and transfers tokens to the miner's account"""

        mint_txhash = self.miner_agent.mint(node_address=self.checksum_public_address)
        self._transaction_cache.append((datetime.utcnow(), mint_txhash))

        return mint_txhash

    @only_me
    def collect_policy_reward(self, policy_manager):
        """Collect rewarded ETH"""

        policy_reward_txhash = policy_manager.collect_policy_reward(collector_address=self.checksum_public_address)
        self._transaction_cache.append((datetime.utcnow(), policy_reward_txhash))

        return policy_reward_txhash

    @only_me
    def collect_staking_reward(self, collector_address: str) -> str:
        """Withdraw tokens rewarded for staking."""

        collection_txhash = self.miner_agent.collect_staking_reward(collector_address=collector_address)
        self._transaction_cache.append((datetime.utcnow(), collection_txhash))

        return collection_txhash
Beispiel #6
0
class BlockchainInterface:
    """
    Interacts with a solidity compiler and a registry in order to instantiate compiled
    ethereum contracts with the given web3 provider backend.
    """

    TIMEOUT = 180  # seconds
    NULL_ADDRESS = '0x' + '0' * 40

    process = NO_PROVIDER_PROCESS.bool_value(False)
    Web3 = Web3

    _contract_factory = VersionedContract

    class InterfaceError(Exception):
        pass

    class NoProvider(InterfaceError):
        pass

    class UnsupportedProvider(InterfaceError):
        pass

    class ConnectionFailed(InterfaceError):
        pass

    class UnknownContract(InterfaceError):
        pass

    def __init__(self,
                 poa: bool = True,
                 light: bool = False,
                 provider_process: NuCypherGethProcess = NO_PROVIDER_PROCESS,
                 provider_uri: str = NO_BLOCKCHAIN_CONNECTION,
                 provider: Web3Providers = NO_BLOCKCHAIN_CONNECTION):
        """
        A blockchain "network interface"; The circumflex wraps entirely around the bounds of
        contract operations including compilation, deployment, and execution.

        TODO: #1502 - Move me to docs.

         Filesystem          Configuration           Node              Client                  EVM
        ================ ====================== =============== =====================  ===========================

         Solidity Files -- SolidityCompiler -                      --- HTTPProvider ------ ...
                                            |                    |
                                            |                    |
                                            |                    |
                                            - *BlockchainInterface* -- IPCProvider ----- External EVM (geth, parity...)
                                                       |         |
                                                       |         |
                                                 TestProvider ----- EthereumTester -------------
                                                                                                |
                                                                                                |
                                                                                        PyEVM (Development Chain)

         ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

         Runtime Files --                 --BlockchainInterface ----> Registry
                        |                |             ^
                        |                |             |
                        |                |             |
         Key Files ------ CharacterConfiguration     Agent                          ... (Contract API)
                        |                |             ^
                        |                |             |
                        |                |             |
                        |                |           Actor                          ...Blockchain-Character API)
                        |                |             ^
                        |                |             |
                        |                |             |
         Config File ---                  --------- Character                       ... (Public API)
                                                       ^
                                                       |
                                                     Human


        The Blockchain is the junction of the solidity compiler, a contract registry, and a collection of
        web3 network providers as a means of interfacing with the ethereum blockchain to execute
        or deploy contract code on the network.


        Compiler and Registry Usage
        -----------------------------

        Contracts are freshly re-compiled if an instance of SolidityCompiler is passed; otherwise,
        The registry will read contract data saved to disk that is be used to retrieve contact address and op-codes.
        Optionally, A registry instance can be passed instead.


        Provider Usage
        ---------------
        https: // github.com / ethereum / eth - tester     # available-backends


        * HTTP Provider - Web3 HTTP provider, typically JSON RPC 2.0 over HTTP
        * Websocket Provider - Web3 WS provider, typically JSON RPC 2.0 over WS, supply endpoint uri and websocket=True
        * IPC Provider - Web3 File based IPC provider transported over standard I/O
        * Custom Provider - A pre-initialized web3.py provider instance to attach to this interface

        """

        self.log = Logger('Blockchain')
        self.poa = poa
        self.provider_uri = provider_uri
        self._provider = provider
        self._provider_process = provider_process
        self.w3 = NO_BLOCKCHAIN_CONNECTION
        self.client = NO_BLOCKCHAIN_CONNECTION  # type: Web3Client
        self.transacting_power = READ_ONLY_INTERFACE
        self.is_light = light

    def __repr__(self):
        r = '{name}({uri})'.format(name=self.__class__.__name__,
                                   uri=self.provider_uri)
        return r

    @classmethod
    def from_dict(cls, payload: dict, **overrides) -> 'BlockchainInterface':
        payload.update({k: v for k, v in overrides.items() if v is not None})
        blockchain = cls(**payload)
        return blockchain

    def to_dict(self) -> dict:
        payload = dict(provider_uri=self.provider_uri,
                       poa=self.poa,
                       light=self.is_light)
        return payload

    @property
    def is_connected(self) -> bool:
        """
        https://web3py.readthedocs.io/en/stable/__provider.html#examples-using-automated-detection
        """
        if self.client is NO_BLOCKCHAIN_CONNECTION:
            return False
        return self.client.is_connected

    def attach_middleware(self):

        # For use with Proof-Of-Authority test-blockchains
        if self.poa is True:
            self.log.debug('Injecting POA middleware at layer 0')
            self.client.inject_middleware(geth_poa_middleware, layer=0)

    def connect(self):

        # Spawn child process
        if self._provider_process:
            self._provider_process.start()
            provider_uri = self._provider_process.provider_uri(scheme='file')
        else:
            provider_uri = self.provider_uri
            self.log.info(
                f"Using external Web3 Provider '{self.provider_uri}'")

        # Attach Provider
        self._attach_provider(provider=self._provider,
                              provider_uri=provider_uri)
        self.log.info("Connecting to {}".format(self.provider_uri))
        if self._provider is NO_BLOCKCHAIN_CONNECTION:
            raise self.NoProvider(
                "There are no configured blockchain providers")

        # Connect if not connected
        try:
            self.w3 = self.Web3(provider=self._provider)
            self.client = Web3Client.from_w3(w3=self.w3)
        except requests.ConnectionError:  # RPC
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is RPC enabled?'
            )
        except FileNotFoundError:  # IPC File Protocol
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is IPC enabled?'
            )
        else:
            self.attach_middleware()

        return self.is_connected

    def sync(self, show_progress: bool = False) -> None:

        sync_state = self.client.sync()
        if show_progress:
            import click
            # TODO: #1503 - It is possible that output has been redirected from a higher-level emitter.
            # TODO: #1503 - Use console logging instead of StdOutEmitter here.
            emitter = StdoutEmitter()

            emitter.echo(
                f"Syncing: {self.client.chain_name.capitalize()}. Waiting for sync to begin."
            )

            while not len(self.client.peers):
                emitter.echo("waiting for peers...")
                time.sleep(5)

            peer_count = len(self.client.peers)
            emitter.echo(
                f"Found {'an' if peer_count == 1 else peer_count} Ethereum peer{('s' if peer_count > 1 else '')}."
            )

            try:
                emitter.echo("Beginning sync...")
                initial_state = next(sync_state)
            except StopIteration:  # will occur if no syncing needs to happen
                emitter.echo("Local blockchain data is already synced.")
                return

            prior_state = initial_state
            total_blocks_to_sync = int(initial_state.get(
                'highestBlock', 0)) - int(initial_state.get('currentBlock', 0))
            with click.progressbar(length=total_blocks_to_sync,
                                   label="sync progress") as bar:
                for syncdata in sync_state:
                    if syncdata:
                        blocks_accomplished = int(
                            syncdata['currentBlock']) - int(
                                prior_state.get('currentBlock', 0))
                        bar.update(blocks_accomplished)
                        prior_state = syncdata
        else:
            try:
                for syncdata in sync_state:
                    self.client.log.info(
                        f"Syncing {syncdata['currentBlock']}/{syncdata['highestBlock']}"
                    )
            except TypeError:  # it's already synced
                return
        return

    @property
    def provider(self) -> Union[IPCProvider, WebsocketProvider, HTTPProvider]:
        return self._provider

    def _attach_provider(self,
                         provider: Web3Providers = None,
                         provider_uri: str = None) -> None:
        """
        https://web3py.readthedocs.io/en/latest/providers.html#providers
        """

        if not provider_uri and not provider:
            raise self.NoProvider("No URI or provider instances supplied.")

        if provider_uri and not provider:
            uri_breakdown = urlparse(provider_uri)

            if uri_breakdown.scheme == 'tester':
                providers = {
                    'pyevm': _get_tester_pyevm,
                    'geth': _get_test_geth_parity_provider,
                    'parity-ethereum': _get_test_geth_parity_provider,
                }
                provider_scheme = uri_breakdown.netloc

            else:
                providers = {
                    'auto': _get_auto_provider,
                    'infura': _get_infura_provider,
                    'ipc': _get_IPC_provider,
                    'file': _get_IPC_provider,
                    'ws': _get_websocket_provider,
                    'http': _get_HTTP_provider,
                    'https': _get_HTTP_provider,
                }
                provider_scheme = uri_breakdown.scheme

            # auto-detect for file based ipc
            if not provider_scheme:
                if os.path.exists(provider_uri):
                    # file is available - assume ipc/file scheme
                    provider_scheme = 'file'
                    self.log.info(
                        f"Auto-detected provider scheme as 'file://' for provider {provider_uri}"
                    )

            try:
                self._provider = providers[provider_scheme](provider_uri)
            except KeyError:
                raise self.UnsupportedProvider(
                    f"{provider_uri} is an invalid or unsupported blockchain provider URI"
                )
            else:
                self.provider_uri = provider_uri or NO_BLOCKCHAIN_CONNECTION
        else:
            self._provider = provider

    @validate_checksum_address
    def send_transaction(
        self,
        contract_function: ContractFunction,
        sender_address: str,
        payload: dict = None,
        transaction_gas_limit: int = None,
    ) -> dict:

        if self.transacting_power is READ_ONLY_INTERFACE:
            raise self.InterfaceError(str(READ_ONLY_INTERFACE))

        #
        # Build
        #

        if not payload:
            payload = {}

        nonce = self.client.w3.eth.getTransactionCount(sender_address,
                                                       'pending')
        payload.update({
            'chainId': int(self.client.chain_id),
            'nonce': nonce,
            'from': sender_address,
            'gasPrice': self.client.gas_price
        })

        if transaction_gas_limit:
            payload['gas'] = int(transaction_gas_limit)

        # Get interface name
        deployment = True if isinstance(contract_function,
                                        ContractConstructor) else False

        try:
            transaction_name = contract_function.fn_name.upper()
        except AttributeError:
            if deployment:
                transaction_name = 'DEPLOY'
            else:
                transaction_name = 'UNKNOWN'

        payload_pprint = dict(payload)
        payload_pprint['from'] = to_checksum_address(payload['from'])
        payload_pprint = ', '.join("{}: {}".format(k, v)
                                   for k, v in payload_pprint.items())
        self.log.debug(f"[TX-{transaction_name}] | {payload_pprint}")

        # Build transaction payload
        try:
            unsigned_transaction = contract_function.buildTransaction(payload)
        except (ValidationError, ValueError) as e:
            # TODO: #1504 - Handle validation failures for gas limits, invalid fields, etc.
            # Note: Geth raises ValueError in the same condition that pyevm raises ValidationError here.
            # Treat this condition as "Transaction Failed".
            error = str(e).replace("{", "{{").replace("}", "}}")  # See #724
            self.log.critical(f"Validation error: {error}")
            raise
        else:
            if deployment:
                self.log.info(
                    f"Deploying contract: {len(unsigned_transaction['data'])} bytes"
                )

        #
        # Broadcast
        #

        signed_raw_transaction = self.transacting_power.sign_transaction(
            unsigned_transaction)
        txhash = self.client.send_raw_transaction(signed_raw_transaction)

        try:
            receipt = self.client.wait_for_receipt(txhash,
                                                   timeout=self.TIMEOUT)
        except TimeExhausted:
            # TODO: #1504 - Handle transaction timeout
            raise
        else:
            self.log.debug(
                f"[RECEIPT-{transaction_name}] | txhash: {receipt['transactionHash'].hex()}"
            )

        #
        # Confirm
        #

        # Primary check
        deployment_status = receipt.get('status', UNKNOWN_TX_STATUS)
        if deployment_status == 0:
            failure = f"Transaction transmitted, but receipt returned status code 0. " \
                      f"Full receipt: \n {pprint.pformat(receipt, indent=2)}"
            raise self.InterfaceError(failure)

        if deployment_status is UNKNOWN_TX_STATUS:
            self.log.info(
                f"Unknown transaction status for {txhash} (receipt did not contain a status field)"
            )

            # Secondary check
            tx = self.client.get_transaction(txhash)
            if tx["gas"] == receipt["gasUsed"]:
                raise self.InterfaceError(
                    f"Transaction consumed 100% of transaction gas."
                    f"Full receipt: \n {pprint.pformat(receipt, indent=2)}")

        return receipt

    def get_contract_by_name(
        self,
        registry: BaseContractRegistry,
        contract_name: str,
        contract_version: str = None,
        enrollment_version: Union[int, str] = None,
        proxy_name: str = None,
        use_proxy_address: bool = True
    ) -> Union[VersionedContract, List[tuple]]:
        """
        Instantiate a deployed contract from registry data,
        and assimilate it with its proxy if it is upgradeable,
        or return all registered records if use_proxy_address is False.
        """
        target_contract_records = registry.search(
            contract_name=contract_name, contract_version=contract_version)

        if not target_contract_records:
            raise self.UnknownContract(
                f"No such contract records with name {contract_name}:{contract_version}."
            )

        if proxy_name:

            # Lookup proxies; Search for a published proxy that targets this contract record
            proxy_records = registry.search(contract_name=proxy_name)

            results = list()
            for proxy_name, proxy_version, proxy_address, proxy_abi in proxy_records:
                proxy_contract = self.client.w3.eth.contract(
                    abi=proxy_abi,
                    address=proxy_address,
                    version=proxy_version,
                    ContractFactoryClass=self._contract_factory)

                # Read this dispatcher's target address from the blockchain
                proxy_live_target_address = proxy_contract.functions.target(
                ).call()
                for target_name, target_version, target_address, target_abi in target_contract_records:

                    if target_address == proxy_live_target_address:
                        if use_proxy_address:
                            triplet = (proxy_address, target_version,
                                       target_abi)
                        else:
                            triplet = (target_address, target_version,
                                       target_abi)
                    else:
                        continue

                    results.append(triplet)

            if len(results) > 1:
                address, _version, _abi = results[0]
                message = "Multiple {} deployments are targeting {}".format(
                    proxy_name, address)
                raise self.InterfaceError(message.format(contract_name))

            else:
                try:
                    selected_address, selected_version, selected_abi = results[
                        0]
                except IndexError:
                    raise self.UnknownContract(
                        f"There are no Dispatcher records targeting '{contract_name}':{contract_version}"
                    )

        else:
            # NOTE: 0 must be allowed as a valid version number
            if len(target_contract_records) != 1:
                if enrollment_version is None:
                    m = f"{len(target_contract_records)} records enrolled " \
                        f"for contract {contract_name}:{contract_version} " \
                        f"and no version index was supplied."
                    raise self.InterfaceError(m)
                enrollment_version = self.__get_enrollment_version_index(
                    name=contract_name,
                    contract_version=contract_version,
                    version_index=enrollment_version,
                    enrollments=len(target_contract_records))

            else:
                enrollment_version = -1  # default

            _contract_name, selected_version, selected_address, selected_abi = target_contract_records[
                enrollment_version]

        # Create the contract from selected sources
        unified_contract = self.client.w3.eth.contract(
            abi=selected_abi,
            address=selected_address,
            version=selected_version,
            ContractFactoryClass=self._contract_factory)

        return unified_contract

    @staticmethod
    def __get_enrollment_version_index(version_index: Union[int, str],
                                       enrollments: int, name: str,
                                       contract_version: str):
        version_names = {'latest': -1, 'earliest': 0}
        try:
            version = version_names[version_index]
        except KeyError:
            try:
                version = int(version_index)
            except ValueError:
                what_is_this = version_index
                raise ValueError(
                    f"'{what_is_this}' is not a valid enrollment version number"
                )
            else:
                if version > enrollments - 1:
                    message = f"Version index '{version}' is larger than the number of enrollments " \
                              f"for {name}:{contract_version}."
                    raise ValueError(message)
        return version
Beispiel #7
0
class Deployer(NucypherTokenActor):

    # Registry of deployer classes
    deployers = (
        NucypherTokenDeployer,
        MinerEscrowDeployer,
        PolicyManagerDeployer,
        MiningAdjudicatorDeployer,
        UserEscrowProxyDeployer,
    )

    contract_names = tuple(a.registry_contract_name
                           for a in EthereumContractAgent.__subclasses__())

    __interface_class = BlockchainDeployerInterface

    def __init__(self,
                 blockchain: Blockchain,
                 deployer_address: str = None,
                 bare: bool = True) -> None:

        self.blockchain = blockchain
        self.__deployer_address = NO_DEPLOYER_ADDRESS
        if deployer_address:
            self.deployer_address = deployer_address

        if not bare:
            self.token_agent = NucypherTokenAgent(blockchain=blockchain)
            self.miner_agent = MinerAgent(blockchain=blockchain)
            self.policy_agent = PolicyAgent(blockchain=blockchain)
            self.adjudicator_agent = MiningAdjudicatorAgent(
                blockchain=blockchain)

        self.user_escrow_deployers = dict()

        self.deployers = {
            NucypherTokenDeployer.contract_name:
            self.deploy_token_contract,
            MinerEscrowDeployer.contract_name:
            self.deploy_miner_contract,
            PolicyManagerDeployer.contract_name:
            self.deploy_policy_contract,
            UserEscrowProxyDeployer.contract_name:
            self.deploy_escrow_proxy,
            MiningAdjudicatorDeployer.contract_name:
            self.deploy_mining_adjudicator_contract,
        }

        self.log = Logger("Deployment-Actor")

    def __repr__(self):
        r = '{name}({blockchain}, {deployer_address})'.format(
            name=self.__class__.__name__,
            blockchain=self.blockchain,
            deployer_address=self.deployer_address)
        return r

    @classmethod
    def from_blockchain(cls,
                        provider_uri: str,
                        registry=None,
                        *args,
                        **kwargs):
        blockchain = Blockchain.connect(provider_uri=provider_uri,
                                        registry=registry)
        instance = cls(blockchain=blockchain, *args, **kwargs)
        return instance

    @property
    def deployer_address(self):
        return self.blockchain.interface.deployer_address

    @deployer_address.setter
    def deployer_address(self, value):
        """Used for validated post-init setting of deployer's address"""
        self.blockchain.interface.deployer_address = value

    @property
    def token_balance(self) -> NU:
        if self.token_agent is CONTRACT_NOT_DEPLOYED:
            message = f"{self.token_agent.contract_name} contract is not deployed, or the registry has missing records."
            raise self.ActorError(message)
        return super().token_balance

    def deploy_token_contract(self) -> dict:
        token_deployer = NucypherTokenDeployer(
            blockchain=self.blockchain, deployer_address=self.deployer_address)
        txhashes = token_deployer.deploy()
        self.token_agent = token_deployer.make_agent()
        return txhashes

    def deploy_miner_contract(self, secret: bytes) -> dict:
        secret = self.blockchain.interface.w3.keccak(secret)
        miner_escrow_deployer = MinerEscrowDeployer(
            blockchain=self.blockchain,
            deployer_address=self.deployer_address,
            secret_hash=secret)

        txhashes = miner_escrow_deployer.deploy()
        self.miner_agent = miner_escrow_deployer.make_agent()
        return txhashes

    def deploy_policy_contract(self, secret: bytes) -> dict:
        secret = self.blockchain.interface.w3.keccak(secret)
        policy_manager_deployer = PolicyManagerDeployer(
            blockchain=self.blockchain,
            deployer_address=self.deployer_address,
            secret_hash=secret)

        txhashes = policy_manager_deployer.deploy()
        self.policy_agent = policy_manager_deployer.make_agent()
        return txhashes

    def deploy_mining_adjudicator_contract(self, secret: bytes) -> dict:
        secret = self.blockchain.interface.w3.keccak(secret)
        mining_adjudicator_deployer = MiningAdjudicatorDeployer(
            blockchain=self.blockchain,
            deployer_address=self.deployer_address,
            secret_hash=secret)

        txhashes = mining_adjudicator_deployer.deploy()
        self.adjudicator_agent = mining_adjudicator_deployer.make_agent()
        return txhashes

    def deploy_escrow_proxy(self, secret: bytes) -> dict:
        secret = self.blockchain.interface.w3.keccak(secret)
        escrow_proxy_deployer = UserEscrowProxyDeployer(
            blockchain=self.blockchain,
            deployer_address=self.deployer_address,
            secret_hash=secret)

        txhashes = escrow_proxy_deployer.deploy()
        return txhashes

    def deploy_user_escrow(
            self,
            allocation_registry: AllocationRegistry) -> UserEscrowDeployer:
        user_escrow_deployer = UserEscrowDeployer(
            blockchain=self.blockchain,
            deployer_address=self.deployer_address,
            allocation_registry=allocation_registry)

        user_escrow_deployer.deploy()
        principal_address = user_escrow_deployer.contract.address
        self.user_escrow_deployers[principal_address] = user_escrow_deployer
        return user_escrow_deployer

    def deploy_network_contracts(
            self, miner_secret: bytes, policy_secret: bytes,
            adjudicator_secret: bytes) -> Tuple[dict, dict]:
        """
        Musketeers, if you will; Deploy the "big three" contracts to the blockchain.
        """
        token_txhashes = self.deploy_token_contract()
        miner_txhashes = self.deploy_miner_contract(secret=miner_secret)
        policy_txhashes = self.deploy_policy_contract(secret=policy_secret)
        adjudicator_txhashes = self.deploy_mining_adjudicator_contract(
            secret=adjudicator_secret)

        txhashes = {
            NucypherTokenDeployer.contract_name: token_txhashes,
            MinerEscrowDeployer.contract_name: miner_txhashes,
            PolicyManagerDeployer.contract_name: policy_txhashes,
            MiningAdjudicatorDeployer.contract_name: adjudicator_txhashes
        }

        agents = {
            NucypherTokenDeployer.contract_name: self.token_agent,
            MinerEscrowDeployer.contract_name: self.miner_agent,
            PolicyManagerDeployer.contract_name: self.policy_agent,
            MiningAdjudicatorDeployer.contract_name: self.adjudicator_agent
        }

        return txhashes, agents

    def deploy_beneficiary_contracts(
        self,
        allocations: List[Dict[str, Union[str, int]]],
        allocation_outfile: str = None,
        allocation_registry: AllocationRegistry = None,
        crash_on_failure: bool = True,
    ) -> Dict[str, dict]:
        """

        Example allocation dataset (one year is 31540000 seconds):

        data = [{'address': '0xdeadbeef', 'amount': 100, 'duration': 31540000},
                {'address': '0xabced120', 'amount': 133432, 'duration': 31540000*2},
                {'address': '0xf7aefec2', 'amount': 999, 'duration': 31540000*3}]
        """
        if allocation_registry and allocation_outfile:
            raise self.ActorError(
                "Pass either allocation registry or allocation_outfile, not both."
            )
        if allocation_registry is None:
            allocation_registry = AllocationRegistry(
                registry_filepath=allocation_outfile)

        allocation_txhashes, failed = dict(), list()
        for allocation in allocations:
            deployer = self.deploy_user_escrow(
                allocation_registry=allocation_registry)

            try:
                txhashes = deployer.deliver(
                    value=allocation['amount'],
                    duration=allocation['duration'],
                    beneficiary_address=allocation['address'])
            except TransactionFailed:
                if crash_on_failure:
                    raise
                self.log.debug(
                    f"Failed allocation transaction for {allocation['amount']} to {allocation['address']}"
                )
                failed.append(allocation)
                continue

            else:
                allocation_txhashes[allocation['address']] = txhashes

        if failed:
            # TODO: More with these failures: send to isolated logfile, and reattempt
            self.log.critical(
                f"FAILED TOKEN ALLOCATION - {len(failed)} Allocations failed.")

        return allocation_txhashes

    @staticmethod
    def __read_allocation_data(filepath: str) -> list:
        with open(filepath, 'r') as allocation_file:
            data = allocation_file.read()
            try:
                allocation_data = json.loads(data)
            except JSONDecodeError:
                raise
        return allocation_data

    def deploy_beneficiaries_from_file(self,
                                       allocation_data_filepath: str,
                                       allocation_outfile: str = None) -> dict:

        allocations = self.__read_allocation_data(
            filepath=allocation_data_filepath)
        txhashes = self.deploy_beneficiary_contracts(
            allocations=allocations, allocation_outfile=allocation_outfile)
        return txhashes
Beispiel #8
0
class EthereumContractRegistry:
    """
    Records known contracts on the disk for future access and utility. This
    lazily writes to the filesystem during contract enrollment.

    WARNING: Unless you are developing NuCypher, you most likely won't ever need
    to use this.
    """

    _multi_contract = True
    _contract_name = NotImplemented

    _default_registry_filepath = os.path.join(DEFAULT_CONFIG_ROOT,
                                              'contract_registry.json')

    __PUBLICATION_USER = "******"
    __PUBLICATION_REPO = f"{__PUBLICATION_USER}/ethereum-contract-registry"

    # Registry
    REGISTRY_NAME = 'contract_registry.json'

    class RegistryError(Exception):
        pass

    class RegistrySourceUnavailable(RegistryError):
        pass

    class EmptyRegistry(RegistryError):
        pass

    class NoRegistry(RegistryError):
        pass

    class UnknownContract(RegistryError):
        pass

    class IllegalRegistry(RegistryError):
        """Raised when invalid data is encountered in the registry"""

    def __init__(self, registry_filepath: str = None) -> None:
        self.log = Logger("registry")
        self.__filepath = registry_filepath or self._default_registry_filepath

    @classmethod
    def _get_registry_class(cls, local=False):
        """
        If "local" is True, it means we are running a local blockchain and we
        have deployed the Nucypher contracts on that blockchain, therefore
        we do not want to download a registry from github.
        """
        return LocalEthereumContractRegistry if local else cls

    @classmethod
    def download_latest_publication(cls,
                                    filepath: str = None,
                                    branch: str = 'goerli') -> str:
        """
        Get the latest published contract registry from github and save it on the local file system.
        """

        # Setup
        github_endpoint = f'https://raw.githubusercontent.com/{cls.__PUBLICATION_REPO}/{branch}/{cls.REGISTRY_NAME}'
        response = requests.get(github_endpoint)

        # Fetch
        if response.status_code != 200:
            error = f"Failed to fetch registry from {github_endpoint} with status code {response.status_code}"
            raise cls.RegistrySourceUnavailable(error)

        # Get filename
        # TODO : Use envvar for config root and registry path
        filepath = filepath or cls._default_registry_filepath

        # Ensure parent path exists
        os.makedirs(abspath(dirname(filepath)), exist_ok=True)

        # Write registry
        with open(filepath, 'wb') as registry_file:
            registry_file.write(response.content)

        return filepath

    @classmethod
    def from_latest_publication(
            cls,
            filepath: str = None,
            branch: str = 'goerli') -> 'EthereumContractRegistry':
        filepath = cls.download_latest_publication(filepath=filepath,
                                                   branch=branch)
        instance = cls(registry_filepath=filepath)
        return instance

    @property
    def filepath(self):
        return self.__filepath

    @property
    def enrolled_names(self):
        entries = iter(record[0] for record in self.read())
        return entries

    @property
    def enrolled_addresses(self):
        entries = iter(record[1] for record in self.read())
        return entries

    def _swap_registry(self, filepath: str) -> bool:
        self.__filepath = filepath
        return True

    def _destroy(self) -> None:
        os.remove(self.filepath)

    def write(self, registry_data: list) -> None:
        """
        Writes the registry data list as JSON to the registry file. If no
        file exists, it will create it and write the data. If a file does exist
        it will _overwrite_ everything in it.
        """
        with open(self.__filepath, 'w+') as registry_file:
            registry_file.seek(0)
            registry_file.write(json.dumps(registry_data))
            registry_file.truncate()

    def read(self) -> Union[list, dict]:
        """
        Reads the registry file and parses the JSON and returns a list.
        If the file is empty it will return an empty list.
        If you are modifying or updating the registry file, you _must_ call
        this function first to get the current state to append to the dict or
        modify it because _write_registry_file overwrites the file.
        """
        try:
            with open(self.filepath, 'r') as registry_file:
                self.log.debug("Reading from registrar: filepath {}".format(
                    self.filepath))
                registry_file.seek(0)
                file_data = registry_file.read()
                if file_data:
                    try:
                        registry_data = json.loads(file_data)
                    except JSONDecodeError:
                        raise self.RegistryError(
                            f"Registry contains invalid JSON at '{self.__filepath}'"
                        )
                else:
                    registry_data = list() if self._multi_contract else dict()

        except FileNotFoundError:
            raise self.NoRegistry("No registry at filepath: {}".format(
                self.filepath))

        except JSONDecodeError:
            raise

        return registry_data

    def enroll(self, contract_name, contract_address, contract_abi):
        """
        Enrolls a contract to the chain registry by writing the name, address,
        and abi information to the filesystem as JSON.

        Note: Unless you are developing NuCypher, you most likely won't ever
        need to use this.
        """
        contract_data = [contract_name, contract_address, contract_abi]
        try:
            registry_data = self.read()
        except self.RegistryError:
            self.log.info("Blank registry encountered: enrolling {}:{}".format(
                contract_name, contract_address))
            registry_data = list()  # empty registry

        registry_data.append(contract_data)
        self.write(registry_data)
        self.log.info("Enrolled {}:{} into registry {}".format(
            contract_name, contract_address, self.filepath))

    def search(self, contract_name: str = None, contract_address: str = None):
        """
        Searches the registry for a contract with the provided name or address
        and returns the contracts component data.
        """
        if not (bool(contract_name) ^ bool(contract_address)):
            raise ValueError(
                "Pass contract_name or contract_address, not both.")

        contracts = list()
        registry_data = self.read()

        try:
            for name, addr, abi in registry_data:
                if contract_name == name or contract_address == addr:
                    contracts.append((name, addr, abi))
        except ValueError:
            message = "Missing or corrupted registry data".format(
                self.__filepath)
            self.log.critical(message)
            raise self.IllegalRegistry(message)

        if not contracts:
            raise self.UnknownContract(contract_name)

        if contract_address and len(contracts) > 1:
            m = "Multiple records returned for address {}"
            self.log.critical(m)
            raise self.IllegalRegistry(m.format(contract_address))

        return contracts if contract_name else contracts[0]
Beispiel #9
0
class Felix(Character, NucypherTokenActor):
    """
    A NuCypher ERC20 faucet / Airdrop scheduler.

    Felix is a web application that gives NuCypher *testnet* tokens to registered addresses
    with a scheduled reduction of disbursement amounts, and an HTTP endpoint
    for handling new address registration.

    The main goal of Felix is to provide a source of testnet tokens for
    research and the development of production-ready nucypher dApps.
    """

    _default_crypto_powerups = [SigningPower]  # identity only

    DISTRIBUTION_INTERVAL = 60 * 60  # seconds (60*60=1Hr)
    DISBURSEMENT_INTERVAL = HOURS_PER_PERIOD  # (24) hours
    STAGING_DELAY = 10  # seconds

    BATCH_SIZE = 10  # transactions
    MULTIPLIER = 0.95  # 5% reduction of previous stake is 0.95, for example
    MAXIMUM_DISBURSEMENT = MAX_ALLOWED_LOCKED  # NuNits
    INITIAL_DISBURSEMENT = MIN_ALLOWED_LOCKED  # NuNits
    MINIMUM_DISBURSEMENT = 1e18  # NuNits
    # TRANSACTION_GAS = 40000                    # gas  TODO

    TEMPLATE_NAME = 'felix.html'

    # Node Discovery
    LEARNING_TIMEOUT = 30  # seconds
    _SHORT_LEARNING_DELAY = 60  # seconds
    _LONG_LEARNING_DELAY = 120  # seconds
    _ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN = 1

    # Twisted
    _CLOCK = reactor
    _AIRDROP_QUEUE = dict()

    class NoDatabase(RuntimeError):
        pass

    def __init__(self,
                 db_filepath: str,
                 rest_host: str,
                 rest_port: int,
                 crash_on_error: bool = False,
                 *args,
                 **kwargs):

        # Character
        super().__init__(*args, **kwargs)
        self.log = Logger(f"felix-{self.checksum_public_address[-6::]}")

        # Network
        self.rest_port = rest_port
        self.rest_host = rest_host
        self.rest_app = NOT_RUNNING
        self.crash_on_error = crash_on_error

        # Database
        self.db_filepath = db_filepath
        self.db = NO_DATABASE_AVAILABLE
        self.db_engine = create_engine(f'sqlite:///{self.db_filepath}',
                                       convert_unicode=True)

        # Blockchain
        self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)
        self.reserved_addresses = [self.checksum_public_address, NULL_ADDRESS]

        # Update reserved addresses with deployed contracts
        existing_entries = list(
            self.blockchain.interface.registry.enrolled_addresses)
        self.reserved_addresses.extend(existing_entries)

        # Distribution
        self.__distributed = 0  # Track NU Output
        self.__airdrop = 0  # Track Batch
        self.__disbursement = 0  # Track Quantity
        self._distribution_task = LoopingCall(f=self.airdrop_tokens)
        self._distribution_task.clock = self._CLOCK
        self.start_time = NOT_RUNNING

        # Banner
        self.log.info(FELIX_BANNER.format(self.checksum_public_address))

    def __repr__(self):
        class_name = self.__class__.__name__
        r = f'{class_name}(checksum_address={self.checksum_public_address}, db_filepath={self.db_filepath})'
        return r

    def make_web_app(self):
        from flask import request
        from flask_sqlalchemy import SQLAlchemy

        # WSGI/Flask Service
        short_name = bytes(self.stamp).hex()[:6]
        self.rest_app = Flask(f"faucet-{short_name}",
                              template_folder=TEMPLATES_DIR)

        # Flask Settings
        self.rest_app.config[
            'SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{self.db_filepath}'
        self.rest_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False

        try:
            self.rest_app.secret_key = sha256(
                os.environ['NUCYPHER_FELIX_DB_SECRET'].encode())  # uses envvar
        except KeyError:
            raise OSError(
                "The 'NUCYPHER_FELIX_DB_SECRET' is not set.  Export your application secret and try again."
            )

        # Database
        self.db = SQLAlchemy(self.rest_app)

        # Database Tables
        class Recipient(self.db.Model):
            """
            The one and only table in Felix's database; Used to track recipients and airdrop metadata.
            """

            __tablename__ = 'recipient'

            id = self.db.Column(self.db.Integer, primary_key=True)
            address = self.db.Column(self.db.String,
                                     unique=True,
                                     nullable=False)
            joined = self.db.Column(self.db.DateTime,
                                    nullable=False,
                                    default=datetime.utcnow)
            total_received = self.db.Column(self.db.String,
                                            default='0',
                                            nullable=False)
            last_disbursement_amount = self.db.Column(self.db.String,
                                                      nullable=False,
                                                      default=0)
            last_disbursement_time = self.db.Column(self.db.DateTime,
                                                    nullable=True,
                                                    default=None)
            is_staking = self.db.Column(self.db.Boolean,
                                        nullable=False,
                                        default=False)

            def __repr__(self):
                return f'{self.__class__.__name__}(id={self.id})'

        self.Recipient = Recipient  # Bind to outer class

        # Flask decorators
        rest_app = self.rest_app
        limiter = Limiter(self.rest_app,
                          key_func=get_remote_address,
                          headers_enabled=True)

        #
        # REST Routes
        #

        @rest_app.route("/", methods=['GET'])
        @limiter.limit("100/day;20/hour;1/minute")
        def home():
            rendering = render_template(self.TEMPLATE_NAME)
            return rendering

        @rest_app.route("/register", methods=['POST'])
        @limiter.limit("5 per day")
        def register():
            """Handle new recipient registration via POST request."""
            try:
                new_address = request.form['address']
            except KeyError:
                return Response(status=400)  # TODO

            if not eth_utils.is_checksum_address(new_address):
                return Response(status=400)  # TODO

            if new_address in self.reserved_addresses:
                return Response(status=400)  # TODO

            try:
                with ThreadedSession(self.db_engine) as session:

                    existing = Recipient.query.filter_by(
                        address=new_address).all()
                    if existing:
                        # Address already exists; Abort
                        self.log.debug(f"{new_address} is already enrolled.")
                        return Response(status=400)

                    # Create the record
                    recipient = Recipient(address=new_address,
                                          joined=datetime.now())
                    session.add(recipient)
                    session.commit()

            except Exception as e:
                # Pass along exceptions to the logger
                self.log.critical(str(e))
                raise

            else:
                return Response(status=200)  # TODO

        return rest_app

    def create_tables(self) -> None:
        return self.db.create_all(app=self.rest_app)

    def start(self,
              host: str,
              port: int,
              web_services: bool = True,
              distribution: bool = True,
              crash_on_error: bool = False):

        self.crash_on_error = crash_on_error

        if self.start_time is not NOT_RUNNING:
            raise RuntimeError("Felix is already running.")

        self.start_time = maya.now()
        payload = {"wsgi": self.rest_app, "http_port": port}
        deployer = HendrixDeploy(action="start", options=payload)
        click.secho(f"Running {self.__class__.__name__} on {host}:{port}")

        if distribution is True:
            self.start_distribution()

        if web_services is True:
            deployer.run()  # <-- Blocking call (Reactor)

    def start_distribution(self, now: bool = True) -> bool:
        """Start token distribution"""
        self.log.info(NU_BANNER)
        self.log.info("Starting NU Token Distribution | START")
        self._distribution_task.start(interval=self.DISTRIBUTION_INTERVAL,
                                      now=now)
        return True

    def stop_distribution(self) -> bool:
        """Start token distribution"""
        self.log.info("Stopping NU Token Distribution | STOP")
        self._distribution_task.stop()
        return True

    def __calculate_disbursement(self, recipient) -> int:
        """Calculate the next reward for a recipient once the are selected for distribution"""

        # Initial Reward - sets the future rates
        if recipient.last_disbursement_time is None:
            amount = self.INITIAL_DISBURSEMENT

        # Cap reached, We'll continue to leak the minimum disbursement
        elif int(recipient.total_received) >= self.MAXIMUM_DISBURSEMENT:
            amount = self.MINIMUM_DISBURSEMENT

        # Calculate the next disbursement
        else:
            amount = math.ceil(
                int(recipient.last_disbursement_amount) * self.MULTIPLIER)
            if amount < self.MINIMUM_DISBURSEMENT:
                amount = self.MINIMUM_DISBURSEMENT

        return int(amount)

    def __transfer(self, disbursement: int, recipient_address: str) -> str:
        """Perform a single token transfer transaction from one account to another."""

        self.__disbursement += 1
        txhash = self.token_agent.transfer(
            amount=disbursement,
            target_address=recipient_address,
            sender_address=self.checksum_public_address)

        self.log.info(
            f"Disbursement #{self.__disbursement} OK | {txhash.hex()[-6:]} | "
            f"({str(NU(disbursement, 'NuNit'))}) -> {recipient_address}")
        return txhash

    def airdrop_tokens(self):
        """
        Calculate airdrop eligibility via faucet registration
        and transfer tokens to selected recipients.
        """

        with ThreadedSession(self.db_engine) as session:
            population = session.query(self.Recipient).count()

        message = f"{population} registered faucet recipients; " \
                  f"Distributed {str(NU(self.__distributed, 'NuNit'))} since {self.start_time.slang_time()}."
        self.log.debug(message)
        if population is 0:
            return  # Abort - no recipients are registered.

        # For filtration
        since = datetime.now() - timedelta(hours=self.DISBURSEMENT_INTERVAL)

        datetime_filter = or_(self.Recipient.last_disbursement_time <= since,
                              self.Recipient.last_disbursement_time == None)

        with ThreadedSession(self.db_engine) as session:
            candidates = session.query(
                self.Recipient).filter(datetime_filter).all()
            if not candidates:
                self.log.info("No eligible recipients this round.")
                return

        # Discard invalid addresses, in-depth
        invalid_addresses = list()

        def siphon_invalid_entries(candidate):
            address_is_valid = eth_utils.is_checksum_address(candidate.address)
            if not address_is_valid:
                invalid_addresses.append(candidate.address)
            return address_is_valid

        candidates = list(filter(siphon_invalid_entries, candidates))

        if invalid_addresses:
            self.log.info(
                f"{len(invalid_addresses)} invalid entries detected. Pruning database."
            )

            # TODO: Is this needed? - Invalid entries are rejected at the endpoint view.
            # Prune database of invalid records
            # with ThreadedSession(self.db_engine) as session:
            #     bad_eggs = session.query(self.Recipient).filter(self.Recipient.address in invalid_addresses).all()
            #     for egg in bad_eggs:
            #         session.delete(egg.id)
            #     session.commit()

        if not candidates:
            self.log.info("No eligible recipients this round.")
            return

        d = threads.deferToThread(self.__do_airdrop, candidates=candidates)
        self._AIRDROP_QUEUE[self.__airdrop] = d
        return d

    def __do_airdrop(self, candidates: list):

        self.log.info(f"Staging Airdrop #{self.__airdrop}.")

        # Staging
        staged_disbursements = [(r, self.__calculate_disbursement(recipient=r))
                                for r in candidates]
        batches = list(
            staged_disbursements[index:index + self.BATCH_SIZE]
            for index in range(0, len(staged_disbursements), self.BATCH_SIZE))
        total_batches = len(batches)

        self.log.info("====== Staged Airdrop ======")
        for recipient, disbursement in staged_disbursements:
            self.log.info(f"{recipient.address} ... {str(disbursement)[:-18]}")
        self.log.info("==========================")

        # Staging Delay
        self.log.info(
            f"Airdrop will commence in {self.STAGING_DELAY} seconds...")
        if self.STAGING_DELAY > 3:
            time.sleep(self.STAGING_DELAY - 3)
        for i in range(3):
            time.sleep(1)
            self.log.info(f"NU Token airdrop starting in {3 - i} seconds...")

        # Slowly, in series...
        for batch, staged_disbursement in enumerate(batches, start=1):
            self.log.info(f"======= Batch #{batch} ========")

            for recipient, disbursement in staged_disbursement:

                # Perform the transfer... leaky faucet.
                self.__transfer(disbursement=disbursement,
                                recipient_address=recipient.address)
                self.__distributed += disbursement

                # Update the database record
                recipient.last_disbursement_amount = str(disbursement)
                recipient.total_received = str(
                    int(recipient.total_received) + disbursement)
                recipient.last_disbursement_time = datetime.now()

                self.db.session.add(recipient)
                self.db.session.commit()

            # end inner loop
            self.log.info(
                f"Completed Airdrop #{self.__airdrop} Batch #{batch} of {total_batches}."
            )

        # end outer loop
        now = maya.now()
        next_interval_slang = now.add(
            seconds=self.DISTRIBUTION_INTERVAL).slang_time()
        self.log.info(
            f"Completed Airdrop #{self.__airdrop}; Next airdrop is {next_interval_slang}."
        )

        del self._AIRDROP_QUEUE[self.__airdrop]
        self.__airdrop += 1
Beispiel #10
0
class NodeConfiguration(ABC):
    """
    'Sideways Engagement' of Character classes; a reflection of input parameters.
    """

    # Abstract
    _NAME = NotImplemented
    _CHARACTER_CLASS = NotImplemented
    CONFIG_FILENAME = NotImplemented
    DEFAULT_CONFIG_FILE_LOCATION = NotImplemented

    # Mode
    DEFAULT_OPERATING_MODE = 'decentralized'

    # Domains
    DEFAULT_DOMAIN = 'goerli'

    # Serializers
    NODE_SERIALIZER = binascii.hexlify
    NODE_DESERIALIZER = binascii.unhexlify

    # System
    __CONFIG_FILE_EXT = '.config'
    __CONFIG_FILE_DESERIALIZER = json.loads
    TEMP_CONFIGURATION_DIR_PREFIX = "nucypher-tmp-"

    # Blockchain
    DEFAULT_PROVIDER_URI = 'http://localhost:8545'

    # Registry
    __REGISTRY_NAME = 'contract_registry.json'
    REGISTRY_SOURCE = os.path.join(BASE_DIR, __REGISTRY_NAME)

    # Rest + TLS
    DEFAULT_REST_HOST = '127.0.0.1'
    DEFAULT_REST_PORT = 9151
    DEFAULT_DEVELOPMENT_REST_PORT = 10151

    DEFAULT_CONTROLLER_PORT = NotImplemented

    __DEFAULT_TLS_CURVE = ec.SECP384R1
    __DEFAULT_NETWORK_MIDDLEWARE_CLASS = RestMiddleware

    class ConfigurationError(RuntimeError):
        pass

    class InvalidConfiguration(ConfigurationError):
        pass

    class NoConfigurationRoot(InvalidConfiguration):
        pass

    def __init__(self,

                 # Base
                 config_root: str = None,
                 config_file_location: str = None,

                 # Mode
                 dev_mode: bool = False,
                 federated_only: bool = False,

                 # Identity
                 is_me: bool = True,
                 checksum_address: str = None,
                 crypto_power: CryptoPower = None,

                 # Keyring
                 keyring: NucypherKeyring = None,
                 keyring_dir: str = None,

                 # Learner
                 learn_on_same_thread: bool = False,
                 abort_on_learning_error: bool = False,
                 start_learning_now: bool = True,

                 # REST
                 rest_host: str = None,
                 rest_port: int = None,
                 controller_port: int = None,

                 # TLS
                 tls_curve: EllipticCurve = None,
                 certificate: Certificate = None,

                 # Network
                 domains: Set[str] = None,
                 interface_signature: Signature = None,
                 network_middleware: RestMiddleware = None,

                 # Node Storage
                 known_nodes: set = None,
                 node_storage: NodeStorage = None,
                 reload_metadata: bool = True,
                 save_metadata: bool = True,

                 # Blockchain
                 poa: bool = False,
                 provider_uri: str = None,
                 provider_process = None,

                 # Registry
                 registry_source: str = None,
                 registry_filepath: str = None,
                 download_registry: bool = True

                 ) -> None:

        # Logs
        self.log = Logger(self.__class__.__name__)

        #
        # REST + TLS + Web
        #
        self.controller_port = controller_port or self.DEFAULT_CONTROLLER_PORT
        self.rest_host = rest_host or self.DEFAULT_REST_HOST
        default_port = (self.DEFAULT_DEVELOPMENT_REST_PORT if dev_mode else self.DEFAULT_REST_PORT)
        self.rest_port = rest_port or default_port
        self.tls_curve = tls_curve or self.__DEFAULT_TLS_CURVE
        self.certificate = certificate

        self.interface_signature = interface_signature
        self.crypto_power = crypto_power

        #
        # Keyring
        #
        self.keyring = keyring or NO_KEYRING_ATTACHED
        self.keyring_dir = keyring_dir or UNINITIALIZED_CONFIGURATION

        # Contract Registry
        self.download_registry = download_registry
        self.__registry_source = registry_source or self.REGISTRY_SOURCE
        self.registry_filepath = registry_filepath or UNINITIALIZED_CONFIGURATION

        #
        # Configuration
        #
        self.config_file_location = config_file_location or UNINITIALIZED_CONFIGURATION
        self.config_root = UNINITIALIZED_CONFIGURATION

        #
        # Mode
        #
        self.federated_only = federated_only
        self.__dev_mode = dev_mode

        if self.__dev_mode:
            self.__temp_dir = UNINITIALIZED_CONFIGURATION
            self.node_storage = ForgetfulNodeStorage(federated_only=federated_only, character_class=self.__class__)
        else:
            self.__temp_dir = LIVE_CONFIGURATION
            self.config_root = config_root or DEFAULT_CONFIG_ROOT
            self._cache_runtime_filepaths()
            self.node_storage = node_storage or LocalFileBasedNodeStorage(federated_only=federated_only,
                                                                          config_root=self.config_root)

        # Domains
        self.domains = domains or {self.DEFAULT_DOMAIN}

        #
        # Identity
        #
        self.is_me = is_me
        self.checksum_address = checksum_address

        if self.is_me is True or dev_mode is True:
            # Self
            if self.checksum_address and dev_mode is False:
                self.attach_keyring()
            self.network_middleware = network_middleware or self.__DEFAULT_NETWORK_MIDDLEWARE_CLASS()

        else:
            # Stranger
            self.node_storage = STRANGER_CONFIGURATION
            self.keyring_dir = STRANGER_CONFIGURATION
            self.keyring = STRANGER_CONFIGURATION
            self.network_middleware = STRANGER_CONFIGURATION
            if network_middleware:
                raise self.ConfigurationError("Cannot configure a stranger to use network middleware.")

        #
        # Learner
        #
        self.learn_on_same_thread = learn_on_same_thread
        self.abort_on_learning_error = abort_on_learning_error
        self.start_learning_now = start_learning_now
        self.save_metadata = save_metadata
        self.reload_metadata = reload_metadata

        self.__fleet_state = FleetStateTracker()
        known_nodes = known_nodes or set()
        if known_nodes:
            self.known_nodes._nodes.update({node.checksum_address: node for node in known_nodes})
            self.known_nodes.record_fleet_state()  # TODO: Does this call need to be here?

        #
        # Blockchain
        #
        self.poa = poa
        self.provider_uri = provider_uri or self.DEFAULT_PROVIDER_URI
        self.provider_process = provider_process or NO_BLOCKCHAIN_CONNECTION

        self.blockchain = NO_BLOCKCHAIN_CONNECTION.bool_value(False)
        self.accounts = NO_BLOCKCHAIN_CONNECTION
        self.token_agent = NO_BLOCKCHAIN_CONNECTION
        self.miner_agent = NO_BLOCKCHAIN_CONNECTION
        self.policy_agent = NO_BLOCKCHAIN_CONNECTION

        #
        # Development Mode
        #

        if dev_mode:

            # Ephemeral dev settings
            self.abort_on_learning_error = True
            self.save_metadata = False
            self.reload_metadata = False

            # Generate one-time alphanumeric development password
            alphabet = string.ascii_letters + string.digits
            password = ''.join(secrets.choice(alphabet) for _ in range(32))

            # Auto-initialize
            self.initialize(password=password, download_registry=download_registry)

    def __call__(self, *args, **kwargs):
        return self.produce(*args, **kwargs)

    @classmethod
    def generate(cls, password: str, *args, **kwargs):
        """Shortcut: Hook-up a new initial installation and write configuration file to the disk"""
        node_config = cls(dev_mode=False, is_me=True, *args, **kwargs)
        node_config.__write(password=password)
        return node_config

    def __write(self, password: str):
        _new_installation_path = self.initialize(password=password, download_registry=self.download_registry)
        _configuration_filepath = self.to_configuration_file(filepath=self.config_file_location)

    def cleanup(self) -> None:
        if self.__dev_mode:
            self.__temp_dir.cleanup()
        if self.blockchain:
            self.blockchain.disconnect()

    @property
    def dev_mode(self):
        return self.__dev_mode

    @property
    def known_nodes(self):
        return self.__fleet_state

    def connect_to_blockchain(self,
                              enode: str = None,
                              recompile_contracts: bool = False,
                              full_sync: bool = False) -> None:
        """

        :param enode: ETH seednode or bootnode enode address to start learning from,
                      i.e. 'enode://[email protected]:30303'

        :param recompile_contracts: Recompile all contracts on connection.

        :return: None
        """
        if self.federated_only:
            raise NodeConfiguration.ConfigurationError("Cannot connect to blockchain in federated mode")

        self.blockchain = Blockchain.connect(provider_uri=self.provider_uri,
                                             compile=recompile_contracts,
                                             poa=self.poa,
                                             fetch_registry=True,
                                             provider_process=self.provider_process,
                                             sync=full_sync)

        # Read Ethereum Node Keyring
        self.accounts = self.blockchain.interface.w3.eth.accounts

        # Add Ethereum Peer
        if enode:
            if self.blockchain.interface.client_version == 'geth':
                self.blockchain.interface.w3.geth.admin.addPeer(enode)
            else:
                raise NotImplementedError

    def connect_to_contracts(self) -> None:
        """Initialize contract agency and set them on config"""
        self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)
        self.miner_agent = MinerAgent(blockchain=self.blockchain)
        self.policy_agent = PolicyAgent(blockchain=self.blockchain)
        self.log.debug("Established connection to nucypher contracts")

    def read_known_nodes(self):
        known_nodes = self.node_storage.all(federated_only=self.federated_only)
        known_nodes = {node.checksum_address: node for node in known_nodes}
        self.known_nodes._nodes.update(known_nodes)
        self.known_nodes.record_fleet_state()
        return self.known_nodes

    def forget_nodes(self) -> None:
        self.node_storage.clear()
        message = "Removed all stored node node metadata and certificates"
        self.log.debug(message)

    def destroy(self) -> None:
        """Parse a node configuration and remove all associated files from the filesystem"""
        self.keyring.destroy()
        os.remove(self.config_file_location)

    def generate_parameters(self, **overrides) -> dict:
        merged_parameters = {**self.static_payload, **self.dynamic_payload, **overrides}
        non_init_params = ('config_root', 'poa', 'provider_uri')
        character_init_params = filter(lambda t: t[0] not in non_init_params, merged_parameters.items())
        return dict(character_init_params)

    def produce(self, **overrides):
        """Initialize a new character instance and return it."""
        merged_parameters = self.generate_parameters(**overrides)
        character = self._CHARACTER_CLASS(**merged_parameters)
        return character

    @staticmethod
    def _read_configuration_file(filepath: str) -> dict:
        try:
            with open(filepath, 'r') as file:
                raw_contents = file.read()
                payload = NodeConfiguration.__CONFIG_FILE_DESERIALIZER(raw_contents)
        except FileNotFoundError:
            raise
        return payload

    @classmethod
    def get_configuration_payload(cls, filepath: str = None, **overrides) -> dict:

        from nucypher.config.storages import NodeStorage
        node_storage_subclasses = {storage._name: storage for storage in NodeStorage.__subclasses__()}

        if filepath is None:
            filepath = cls.DEFAULT_CONFIG_FILE_LOCATION

        # Read from disk
        payload = cls._read_configuration_file(filepath=filepath)

        # Sanity check
        try:
            checksum_address = payload['checksum_address']
        except KeyError:
            raise cls.ConfigurationError(f"No checksum address specified in configuration file {filepath}")
        else:
            if not eth_utils.is_checksum_address(checksum_address):
                raise cls.ConfigurationError(f"Address: '{checksum_address}', specified in {filepath} is not a valid checksum address.")

        # Initialize NodeStorage subclass from file (sub-configuration)
        storage_payload = payload['node_storage']
        storage_type = storage_payload[NodeStorage._TYPE_LABEL]
        storage_class = node_storage_subclasses[storage_type]
        node_storage = storage_class.from_payload(payload=storage_payload,
                                                  federated_only=payload['federated_only'],
                                                  serializer=cls.NODE_SERIALIZER,
                                                  deserializer=cls.NODE_DESERIALIZER)

        domains = set(payload['domains'])
        payload.update(dict(node_storage=node_storage, domains=domains))

        # Filter out Nones from overrides to detect, well, overrides
        overrides = {k: v for k, v in overrides.items() if v is not None}

        payload = {**payload, **overrides}
        return payload

    @classmethod
    def from_configuration_file(cls,
                                filepath: str = None,
                                provider_process=None,
                                **overrides) -> 'NodeConfiguration':

        """Initialize a NodeConfiguration from a JSON file."""

        payload = cls.get_configuration_payload(filepath=filepath, **overrides)

        # Instantiate from merged params
        node_configuration = cls(config_file_location=filepath,
                                 provider_process=provider_process,
                                 **payload)

        return node_configuration

    def to_configuration_file(self, filepath: str = None) -> str:
        """Write the static_payload to a JSON file."""
        if not filepath:
            filepath = os.path.join(self.config_root, self.CONFIG_FILENAME)

        if os.path.isfile(filepath):
            # Avoid overriding an existing default configuration
            filename = f'{self._NAME.lower()}-{self.checksum_address[:6]}{self.__CONFIG_FILE_EXT}'
            filepath = os.path.join(self.config_root, filename)

        payload = self.static_payload
        del payload['is_me']

        # Save node connection data
        payload.update(dict(node_storage=self.node_storage.payload(), domains=list(self.domains)))

        with open(filepath, 'w') as config_file:
            config_file.write(json.dumps(payload, indent=4))
        return filepath

    def validate(self, config_root: str, no_registry=False) -> bool:
        # Top-level
        if not os.path.exists(config_root):
            raise self.ConfigurationError('No configuration directory found at {}.'.format(config_root))

        # Sub-paths
        filepaths = self.runtime_filepaths
        if no_registry:
            del filepaths['registry_filepath']

        for field, path in filepaths.items():
            if not os.path.exists(path):
                message = 'Missing configuration file or directory: {}.'
                if 'registry' in path:
                    message += ' Did you mean to pass --federated-only?'                    
                raise NodeConfiguration.InvalidConfiguration(message.format(path))
        return True

    @property
    def static_payload(self) -> dict:
        """Exported static configuration values for initializing Ursula"""
        payload = dict(
            config_root=self.config_root,

            # Identity
            is_me=self.is_me,
            federated_only=self.federated_only,
            checksum_address=self.checksum_address,
            keyring_dir=self.keyring_dir,

            # Behavior
            domains=self.domains,  # From Set
            provider_uri=self.provider_uri,
            learn_on_same_thread=self.learn_on_same_thread,
            abort_on_learning_error=self.abort_on_learning_error,
            start_learning_now=self.start_learning_now,
            save_metadata=self.save_metadata,
        )

        if not self.federated_only:
            payload.update(dict(provider_uri=self.provider_uri, poa=self.poa))

        return payload

    @property
    def dynamic_payload(self, connect_to_blockchain: bool = True, **overrides) -> dict:
        """Exported dynamic configuration values for initializing Ursula"""

        if self.reload_metadata:
            known_nodes = self.node_storage.all(federated_only=self.federated_only)
            known_nodes = {node.checksum_address: node for node in known_nodes}
            self.known_nodes._nodes.update(known_nodes)
        self.known_nodes.record_fleet_state()

        payload = dict(network_middleware=self.network_middleware or self.__DEFAULT_NETWORK_MIDDLEWARE_CLASS(),
                       known_nodes=self.known_nodes,
                       node_storage=self.node_storage,
                       crypto_power_ups=self.derive_node_power_ups() or None)

        if not self.federated_only and connect_to_blockchain:
            self.connect_to_blockchain(recompile_contracts=False)
            payload.update(blockchain=self.blockchain)

        if overrides:
            self.log.debug("Overrides supplied to dynamic payload for {}".format(self.__class__.__name__))
            payload.update(overrides)

        return payload

    @property
    def runtime_filepaths(self):
        filepaths = dict(config_root=self.config_root,
                         keyring_dir=self.keyring_dir,
                         registry_filepath=self.registry_filepath)
        return filepaths

    @classmethod
    def generate_runtime_filepaths(cls, config_root: str) -> dict:
        """Dynamically generate paths based on configuration root directory"""
        filepaths = dict(config_root=config_root,
                         config_file_location=os.path.join(config_root, cls.CONFIG_FILENAME),
                         keyring_dir=os.path.join(config_root, 'keyring'),
                         registry_filepath=os.path.join(config_root, NodeConfiguration.__REGISTRY_NAME))
        return filepaths

    def _cache_runtime_filepaths(self) -> None:
        """Generate runtime filepaths and cache them on the config object"""
        filepaths = self.generate_runtime_filepaths(config_root=self.config_root)
        for field, filepath in filepaths.items():
            if getattr(self, field) is UNINITIALIZED_CONFIGURATION:
                setattr(self, field, filepath)

    def derive_node_power_ups(self) -> List[CryptoPowerUp]:
        power_ups = list()
        if self.is_me and not self.dev_mode:
            for power_class in self._CHARACTER_CLASS._default_crypto_powerups:
                power_up = self.keyring.derive_crypto_power(power_class)
                power_ups.append(power_up)
        return power_ups

    def initialize(self, password: str, download_registry: bool = True) -> str:
        """Initialize a new configuration and write installation files to disk."""

        #
        # Create Base System Filepaths
        #

        if self.__dev_mode:
            self.__temp_dir = TemporaryDirectory(prefix=self.TEMP_CONFIGURATION_DIR_PREFIX)
            self.config_root = self.__temp_dir.name
        else:

            # Production Configuration
            try:
                os.mkdir(self.config_root, mode=0o755)

            except FileExistsError:
                if os.listdir(self.config_root):
                    message = "There are existing files located at {}".format(self.config_root)
                    self.log.debug(message)

            except FileNotFoundError:
                os.makedirs(self.config_root, mode=0o755)

        # Generate Installation Subdirectories
        self._cache_runtime_filepaths()

        #
        # Node Storage
        #

        self.node_storage.initialize()

        #
        # Keyring
        #

        if not self.dev_mode:
            if not os.path.isdir(self.keyring_dir):
                os.mkdir(self.keyring_dir, mode=0o700)  # TODO: Keyring backend entry point - COS
            self.write_keyring(password=password)

        #
        # Registry
        #

        if download_registry and not self.federated_only:
            self.registry_filepath = EthereumContractRegistry.download_latest_publication()

        #
        # Verify
        #

        if not self.__dev_mode:
            self.validate(config_root=self.config_root, no_registry=(not download_registry) or self.federated_only)

        #
        # Success
        #

        message = "Created nucypher installation files at {}".format(self.config_root)
        self.log.debug(message)

        return self.config_root

    def attach_keyring(self, checksum_address: str = None, *args, **kwargs) -> None:
        if self.keyring is not NO_KEYRING_ATTACHED:
            if self.keyring.checksum_address != (checksum_address or self.checksum_address):
                raise self.ConfigurationError("There is already a keyring attached to this configuration.")
            return

        if (checksum_address or self.checksum_address) is None:
            raise self.ConfigurationError("No account specified to unlock keyring")

        self.keyring = NucypherKeyring(keyring_root=self.keyring_dir,  # type: str
                                       account=checksum_address or self.checksum_address,  # type: str
                                       *args, **kwargs)

    def write_keyring(self, password: str, wallet: bool = True, **generation_kwargs) -> NucypherKeyring:

        checksum_address = None

        #
        # Decentralized
        #
        if wallet:

            # Note: It is assumed the blockchain is not yet available.
            if not self.federated_only and not self.checksum_address:

                # "Casual Geth"
                if self.provider_process:

                    if not os.path.exists(self.provider_process.data_dir):
                        os.mkdir(self.provider_process.data_dir)

                    # Get or create wallet address (geth etherbase)
                    checksum_address = self.provider_process.ensure_account_exists(password=password)

                # "Formal Geth" - Manual Web3 Provider, We assume is already running and available
                else:
                    self.connect_to_blockchain()
                    if not self.blockchain.interface.client.accounts:
                        raise self.ConfigurationError(f'Web3 provider "{self.provider_uri}" does not have any accounts')
                    checksum_address = self.blockchain.interface.client.etherbase

                # Addresses read from some node keyrings (clients) are *not* returned in checksum format.
                checksum_address = to_checksum_address(checksum_address)

            # Use explicit address
            elif self.checksum_address:
                checksum_address = self.checksum_address

        self.keyring = NucypherKeyring.generate(password=password,
                                                keyring_root=self.keyring_dir,
                                                checksum_address=checksum_address,
                                                **generation_kwargs)
        # Operating mode switch
        if self.federated_only or not wallet:
            self.checksum_address = self.keyring.federated_address
        else:
            self.checksum_address = self.keyring.account

        return self.keyring

    def write_registry(self,
                       output_filepath: str = None,
                       source: str = None,
                       force: bool = False,
                       blank=False) -> str:

        if force and os.path.isfile(output_filepath):
            raise self.ConfigurationError(
                'There is an existing file at the registry output_filepath {}'.format(output_filepath))

        output_filepath = output_filepath or self.registry_filepath
        source = source or self.REGISTRY_SOURCE

        if not blank and not self.dev_mode:
            # Validate Registry
            with open(source, 'r') as registry_file:
                try:
                    json.loads(registry_file.read())
                except JSONDecodeError:
                    message = "The registry source {} is not valid JSON".format(source)
                    self.log.critical(message)
                    raise self.ConfigurationError(message)
                else:
                    self.log.debug("Source registry {} is valid JSON".format(source))

        else:
            self.log.warn("Writing blank registry")
            open(output_filepath, 'w').close()  # write blank

        self.log.debug("Successfully wrote registry to {}".format(output_filepath))
        return output_filepath
Beispiel #11
0
class XSiteServerProtocol(WebSocketServerProtocol):
    def __init__(self):
        super().__init__()
        self._state = ClientState.connected
        self._clientLibrary = None
        self._clientPlatform = None
        self._clientVersion = None
        self._tickInterval = 0.005
        self._latencies = []
        self._id = 0
        self._trackingId = None
        self._trackingOk = True
        self._onUpdate = None
        self.log = Logger()
        self.log.namespace = type(self).__name__

    def clientLibrary(self):
        return self._clientLibrary

    def clientPlatform(self):
        return self._clientPlatform

    def clientVersion(self):
        return self._clientVersion

    def setOnUpdate(self, cb):
        self._onUpdate = cb

    def latency(self):
        from statistics import mean
        if len(self._latencies) > 0:
            return mean(self._latencies)
        else:
            return 0

    def tickRate(self):
        return 1 / self._tickInterval

    def setTickRate(self, tickRate):
        self._tickInterval = 1 / tickRate

    def onOpen(self):
        self.factory.register(self)

    def onMessage(self, payload, isBinary):
        if not isBinary:
            msg = json.loads(payload.decode('utf8'))
            if msg['type'] == MessageType.hello.value:
                self._clientLibrary = msg['library']
                self._clientPlatform = msg['platform']
                self._clientVersion = msg['version']
                self._state = ClientState.listening
                self.log.info('Hello from ' + self._clientLibrary + ' on ' +
                              self._clientPlatform)
                self.sendState()
            elif msg['type'] == MessageType.confirm.value:
                if self._trackingId == msg['id']:
                    delta = datetime.datetime.now() - dateutil.parser.parse(
                        msg['timestamp'])
                    self._latencies.append(delta.microseconds / 2 / 1000)
                    if self._latencies.__len__() > 20:
                        self._latencies.pop(0)
                    self._trackingOk = True
            elif msg['type'] == MessageType.command.value:
                command = msg['command']
                self.log.info('Command ' + command + ' from ' + self.peer)
                if msg['argument']:
                    argument = msg['argument']
                    self.factory.stateObject.consumeCommand(command, argument)
                else:
                    self.factory.stateObject.consumeCommand(command)
            else:
                print(msg)
                self.log.critical('Unknown type of message')

    def connectionLost(self, reason):
        WebSocketServerProtocol.connectionLost(self, reason)
        self.log.info('Client ' + self.peer + "@" + self.clientLibrary() +
                      ' on ' + self.clientPlatform() + " disconnected")
        self.factory.unregister(self)
        self._state = ClientState.disconnected

    def disconnectClient(self):
        self.sendClose(1000, 'please leave')

    def sendState(self):
        if self._state == ClientState.listening:
            state = self.factory.stateObject.getState()
            msg = StateMessage(self._id, state, self.latency(),
                               self.tickRate()).asJson()
            self._id = self._id + 1
            if self._id % 50 == 0 and self._trackingOk:
                self._trackingId = self._id
                self._trackingOk = False
            self.sendMessage(msg.encode('utf8'))
            if self._onUpdate:
                self._onUpdate()

        if self._state != ClientState.disconnected:
            self.factory.reactor.callLater(self._tickInterval, self.sendState)
Beispiel #12
0
class SmsFactory(ClientFactory, Client):
    room = 'NA'
    actions = ('sendsms, readsms')

    def __init__(self, event_fct=None):
        self.protocol = serialLineProtocol()
        self.uid = uuid.uuid4()
        self.protocol.factory = self
        self.log = Logger()
        self.first = True
        self.event = event_fct
        self.callback = None
        self.wait = False
        self.response = ''
        self.resp_re = re.compile(
            r'^OK|ERROR|(\+CM[ES] ERROR: \d+)|(COMMAND NOT SUPPORT)$')

    def receive(self, line):
        if self.wait:
            if self.resp_re.match(line):
                self.wait = False
                self.response.append(line)
                if line.startswith('ERROR'):
                    self.log.critical('error from Modem: %s' % line)
                    if self.callback:
                        self.callback.errback(self.response)
                else:
                    if self.callback:
                        self.callback.callback(self.response)
                self.response = ''
                if self.callback:
                    self.callback = None
            else:
                self.response.append(line)
        elif self.event:
            self.event(line)
        else:
            self.log.debug('unmanaged message from Modem: %s' % line)

    def sendsms(self, recipient, message, callback_fct=None):
        def recipient_set(res):
            self.log.debug('do we have > ? ==> %s' %
                           ('OK' if res == '>' else 'No: ' + res))
            self.callback = defer.Deferred
            if callback_fct:
                self.callback.addCallback(callback_fct)
            self.wait = True
            self.protocol.send(message + b'\x1a')

        def text_mode(res):
            self.callback = defer.Deferred
            self.callback.addCallback(recipient_set)
            self.wait = True
            self.protocol.send(b'AT+CMGS="' + recipient.encode() + b'"\r')

        def modem_init(res):
            self.first = False
            self.callback = defer.Deferred
            self.callback.addCallback(text_mode)
            self.wait = True
            self.protocol.send(b'AT+CMGF=1\r')

        if self.first:
            self.wait = True
            self.callback = defer.Deferred()
            self.callback.addCallback(modem_init)
            self.protocol.send(b'ATZ\r')
        else:
            modem_init('OK')

    def _write(self, txt):
        self.protocol.send(txt.encode())
Beispiel #13
0
class PhotometerService(Service):

    BUFFER_SIZE = 1

    def __init__(self, options, label):

        self.options   = options
        self.label     = label
        self.namespace = self.label.upper()
        setLogLevel(namespace=self.namespace,  levelStr=options['log_messages'])
        setLogLevel(namespace=self.label,      levelStr=options['log_level'])
        self.log       = Logger(namespace=self.label)
        self.factory   = self.buildFactory()
        self.protocol  = None
        self.serport   = None
        self.buffer    = CircularBuffer(self.BUFFER_SIZE, self.log)
        self.counter   = 0
        # Handling of Asynchronous getInfo()
        self.info = None
        self.info_deferred = None
        if options['old_firmware']:
            self.info = {
                'name'  : self.options['name'],
                'mac'   : self.options['mac_address'],
                'calib' : self.options['zp'],
                'rev'   : 2,
                }
        
        # Serial port Handling
        parts = chop(self.options['endpoint'], sep=':')
        if parts[0] != 'serial':
            self.log.critical("Incorrect endpoint type {ep}, should be 'serial'", ep=parts[0])
            raise NotImplementedError
          
    
    def startService(self):
        '''
        Starts the photometer service listens to a TESS
        Although it is technically a synchronous operation, it works well
        with inline callbacks
        '''
        self.log.info("starting {name}", name=self.name)
        self.connect()
       


    def stopService(self):
        self.log.warn("stopping {name}", name=self.name)
        self.protocol.transport.loseConnection()
        self.protocol = None
        self.serport  = None
        #self.parent.childStopped(self)
        return defer.succeed(None)

    #---------------------
    # Extended Service API
    # --------------------

    @inlineCallbacks
    def reloadService(self, new_options):
        '''
        Reload configuration.
        Returns a Deferred
        '''
        options = options[self.label]
        setLogLevel(namespace=self.label,     levelStr=options['log_level'])
        setLogLevel(namespace=self.namespace, levelStr=options['log_messages'])
        self.options = options
        return defer.succeed(None)
      
    # -----------------------
    # Specific photometer API
    # -----------------------

    def handleInfo(self, reading):
        if self.info_deferred is not None:
            self.info = {
                'name'  : reading.get('name', None),
                'calib' : reading.get('ZP', None),
                'mac'   : self.options['mac_address'],
                'rev'   : 2,
            }
            self.log.info("Photometer Info: {info}", info=self.info)
            self.info_deferred.callback(self.info)
            self.info_deferred = None


    def curate(self, reading):
        '''Readings ready for MQTT Tx according to our wire protocol'''
        reading['seq'] = self.counter
        self.counter += 1
        self.last_tstamp = reading.pop('tstamp', None)
        if self.options['old_firmware']:
            reading['mag']  = round(self.options['zp'] - 2.5*math.log10(reading['freq']),2)
            reading['rev']  = 2
            reading['name'] = self.options['name']
            reading['alt']  = 0.0
            reading['azi']  = 0.0
            reading['wdBm'] = 0
            reading.pop('zp', None)
        else:
            reading['mag']  = round(reading['ZP'] - 2.5*math.log10(reading['freq']),2)
            self.info = {
                'name'  : reading.get('name', None),
                'calib' : reading.get('ZP', None),
                'mac'   : self.options['mac_address'],
                'rev'   : 2,
            }
            reading.pop('udp', None)
            reading.pop('ain', None)
            reading.pop('ZP',  None)
        return reading

    
    def getInfo(self):
        '''Asynchronous operations'''
        if not self.options['old_firmware'] and self.info is None:
            deferred = defer.Deferred()
            deferred.addTimeout(60, reactor)
            self.info_deferred = deferred
        else:
            self.log.info("Photometer Info: {info}", info=self.info)
            deferred = defer.succeed(self.info)
        return deferred

    # --------------
    # Helper methods
    # ---------------

    def connect(self):
        parts = chop(self.options['endpoint'], sep=':')
        endpoint = parts[1:]
        self.protocol = self.factory.buildProtocol(0)
        try:
            self.serport  = SerialPort(self.protocol, endpoint[0], reactor, baudrate=endpoint[1])
        except Exception as e:
            self.log.error("{excp}",excp=e)
            self.protocol = None
        else:
            self.gotProtocol(self.protocol)
            self.log.info("Using serial port {tty} @ {baud} bps", tty=endpoint[0], baud=endpoint[1])
    
    
    def buildFactory(self):
        self.log.debug("Choosing a {model} factory", model=TESSW)
        import tessw.tessw
        factory = tessw.tessw.TESSProtocolFactory(self.namespace, self.options['old_firmware'])
        return factory


    def gotProtocol(self, protocol):
        self.log.debug("got protocol")
        self.buffer.registerProducer(protocol, True)
        self.protocol  = protocol
Beispiel #14
0
class Crawler(Learner):
    """
    Obtain Blockchain information for Monitor and output to a DB.
    """

    _SHORT_LEARNING_DELAY = .5
    _LONG_LEARNING_DELAY = 30
    LEARNING_TIMEOUT = 10
    _ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN = 25

    DEFAULT_REFRESH_RATE = 60  # seconds

    # InfluxDB Line Protocol Format (note the spaces, commas):
    # +-----------+--------+-+---------+-+---------+
    # |measurement|,tag_set| |field_set| |timestamp|
    # +-----------+--------+-+---------+-+---------+
    BLOCKCHAIN_DB_MEASUREMENT = 'moe_network_info'   # TODO: should change name but then our historical data is gone
    BLOCKCHAIN_DB_LINE_PROTOCOL = '{measurement},staker_address={staker_address} ' \
                                      'worker_address="{worker_address}",' \
                                      'start_date={start_date},' \
                                      'end_date={end_date},' \
                                      'stake={stake},' \
                                      'locked_stake={locked_stake},' \
                                      'current_period={current_period}i,' \
                                      'last_confirmed_period={last_confirmed_period}i ' \
                                  '{timestamp}'
    BLOCKCHAIN_DB_NAME = 'network'

    BLOCKCHAIN_DB_RETENTION_POLICY_NAME = 'network_info_retention'
    BLOCKCHAIN_DB_RETENTION_POLICY_PERIOD = '5w'  # 5 weeks of data
    BLOCKCHAIN_DB_RETENTION_POLICY_REPLICATION = '1'

    def __init__(self,
                 registry,
                 blockchain_db_host: str,
                 blockchain_db_port: int,
                 node_storage_filepath: str = CrawlerNodeStorage.DEFAULT_DB_FILEPATH,
                 refresh_rate=DEFAULT_REFRESH_RATE,
                 restart_on_error=True,
                 *args, **kwargs):

        self.registry = registry
        self.federated_only = False
        node_storage = CrawlerNodeStorage(storage_filepath=node_storage_filepath)

        class MonitoringTracker(FleetStateTracker):
            def record_fleet_state(self, *args, **kwargs):
                new_state_or_none = super().record_fleet_state(*args, **kwargs)
                if new_state_or_none:
                    _, new_state = new_state_or_none
                    node_storage.store_state_metadata(new_state)

        self.tracker_class = MonitoringTracker

        super().__init__(save_metadata=True, node_storage=node_storage, *args, **kwargs)
        self.log = Logger(self.__class__.__name__)
        self.log.info(f"Storing node metadata in DB: {node_storage.db_filepath}")
        self.log.info(f"Storing blockchain metadata in DB: {blockchain_db_host}:{blockchain_db_port}")

        self._refresh_rate = refresh_rate
        self._restart_on_error = restart_on_error

        # Agency
        self.staking_agent = ContractAgency.get_agent(StakingEscrowAgent, registry=self.registry)

        # Crawler Tasks
        self._nodes_contract_info_learning_task = task.LoopingCall(self._learn_about_nodes_contract_info)

        # initialize InfluxDB
        self._db_host = blockchain_db_host
        self._db_port = blockchain_db_port
        self._blockchain_db_client = None

    def _ensure_blockchain_db_exists(self):
        try:
            db_list = self._blockchain_db_client.get_list_database()
        except requests.exceptions.ConnectionError:
            raise ConnectionError(f"No connection to InfluxDB at {self._db_host}:{self._db_port}")
        found_db = (list(filter(lambda db: db['name'] == self.BLOCKCHAIN_DB_NAME, db_list)))
        if len(found_db) == 0:
            # db not previously created
            self.log.info(f'Database {self.BLOCKCHAIN_DB_NAME} not found, creating it')
            self._blockchain_db_client.create_database(self.BLOCKCHAIN_DB_NAME)
            # TODO: review defaults for retention policy
            self._blockchain_db_client.create_retention_policy(name=self.BLOCKCHAIN_DB_RETENTION_POLICY_NAME,
                                                               duration=self.BLOCKCHAIN_DB_RETENTION_POLICY_PERIOD,
                                                               replication=self.BLOCKCHAIN_DB_RETENTION_POLICY_REPLICATION,
                                                               database=self.BLOCKCHAIN_DB_NAME,
                                                               default=True)
        else:
            self.log.info(f'Database {self.BLOCKCHAIN_DB_NAME} already exists, no need to create it')

    def learn_from_teacher_node(self, *args, **kwargs):
        try:
            current_teacher = self.current_teacher_node(cycle=False)
        except self.NotEnoughTeachers as e:
            self.log.warn("Can't learn right now: {}".format(e.args[0]))
            return

        new_nodes = super().learn_from_teacher_node(*args, **kwargs)

        # update metadata of teacher - not just in memory but in the underlying storage system (db in this case)
        self.node_storage.store_node_metadata(current_teacher)
        self.node_storage.store_current_teacher(current_teacher.checksum_address)

        return new_nodes

    def _learn_about_nodes_contract_info(self):
        agent = self.staking_agent

        block_time = agent.blockchain.client.w3.eth.getBlock('latest').timestamp  # precision in seconds
        current_period = agent.get_current_period()

        nodes_dict = self.known_nodes.abridged_nodes_dict()
        self.log.info(f'Processing {len(nodes_dict)} nodes at '
                      f'{MayaDT(epoch=block_time)} | Period {current_period}')
        data = []
        for staker_address in nodes_dict:
            worker = agent.get_worker_from_staker(staker_address)

            stake = agent.owned_tokens(staker_address)
            staked_nu_tokens = float(NU.from_nunits(stake).to_tokens())
            locked_nu_tokens = float(NU.from_nunits(agent.get_locked_tokens(
                staker_address=staker_address)).to_tokens())

            economics = TokenEconomicsFactory.get_economics(registry=self.registry)
            stakes = StakeList(checksum_address=staker_address, registry=self.registry)
            stakes.refresh()

            # store dates as floats for comparison purposes
            start_date = datetime_at_period(stakes.initial_period,
                                            seconds_per_period=economics.seconds_per_period).datetime().timestamp()
            end_date = datetime_at_period(stakes.terminal_period,
                                          seconds_per_period=economics.seconds_per_period).datetime().timestamp()

            last_confirmed_period = agent.get_last_active_period(staker_address)

            # TODO: do we need to worry about how much information is in memory if number of nodes is
            #  large i.e. should I check for size of data and write within loop if too big
            data.append(self.BLOCKCHAIN_DB_LINE_PROTOCOL.format(
                measurement=self.BLOCKCHAIN_DB_MEASUREMENT,
                staker_address=staker_address,
                worker_address=worker,
                start_date=start_date,
                end_date=end_date,
                stake=staked_nu_tokens,
                locked_stake=locked_nu_tokens,
                current_period=current_period,
                last_confirmed_period=last_confirmed_period,
                timestamp=block_time
            ))

        if not self._blockchain_db_client.write_points(data,
                                                       database=self.BLOCKCHAIN_DB_NAME,
                                                       time_precision='s',
                                                       batch_size=10000,
                                                       protocol='line'):
            # TODO: what do we do here
            self.log.warn(f'Unable to write to database {self.BLOCKCHAIN_DB_NAME} at '
                          f'{MayaDT(epoch=block_time)} | Period {current_period}')

    def _handle_errors(self, *args, **kwargs):
        failure = args[0]
        cleaned_traceback = failure.getTraceback().replace('{', '').replace('}', '')
        if self._restart_on_error:
            self.log.warn(f'Unhandled error: {cleaned_traceback}. Attempting to restart crawler')
            if not self._nodes_contract_info_learning_task.running:
                self.start()
        else:
            self.log.critical(f'Unhandled error: {cleaned_traceback}')

    def start(self):
        """Start the crawler if not already running"""
        if not self.is_running:
            self.log.info('Starting Monitor Crawler')
            if self._blockchain_db_client is None:
                self._blockchain_db_client = InfluxDBClient(host=self._db_host,
                                                            port=self._db_port,
                                                            database=self.BLOCKCHAIN_DB_NAME)
                self._ensure_blockchain_db_exists()

            # start tasks
            node_learner_deferred = self._nodes_contract_info_learning_task.start(interval=self._refresh_rate,
                                                                                  now=False)

            # hookup error callbacks
            node_learner_deferred.addErrback(self._handle_errors)

            self.start_learning_loop(now=False)

    def stop(self):
        """Stop the crawler if currently running"""
        if self.is_running:
            self.log.info('Stopping Monitor Crawler')

            # stop tasks
            self._nodes_contract_info_learning_task.stop()

            if self._blockchain_db_client is not None:
                self._blockchain_db_client.close()
                self._blockchain_db_client = None

            # TODO: should I delete the NodeStorage to close the sqlite db connection here?

    @property
    def is_running(self):
        """Returns True if currently running, False otherwise"""
        return self._nodes_contract_info_learning_task.running
Beispiel #15
0
class Crawler(Learner):
    """
    Obtain Blockchain information for Monitor and output to a DB.
    """

    _SHORT_LEARNING_DELAY = 2
    _LONG_LEARNING_DELAY = 30
    _ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN = 25

    LEARNING_TIMEOUT = 10
    DEFAULT_REFRESH_RATE = 60  # seconds

    # InfluxDB Line Protocol Format (note the spaces, commas):
    # +-----------+--------+-+---------+-+---------+
    # |measurement|,tag_set| |field_set| |timestamp|
    # +-----------+--------+-+---------+-+---------+
    NODE_MEASUREMENT = 'crawler_node_info'
    NODE_LINE_PROTOCOL = '{measurement},staker_address={staker_address} ' \
                         'worker_address="{worker_address}",' \
                         'start_date={start_date},' \
                         'end_date={end_date},' \
                         'stake={stake},' \
                         'locked_stake={locked_stake},' \
                         'current_period={current_period}i,' \
                         'last_confirmed_period={last_confirmed_period}i ' \
                         '{timestamp}'

    EVENT_MEASUREMENT = 'crawler_event_info'
    EVENT_LINE_PROTOCOL = '{measurement},txhash={txhash} ' \
                          'contract_name="{contract_name}",' \
                          'contract_address="{contract_address}",' \
                          'event_name="{event_name}",' \
                          'block_number={block_number}i,' \
                          'args="{args}" ' \
                          '{timestamp}'

    INFLUX_DB_NAME = 'network'
    INFLUX_RETENTION_POLICY_NAME = 'network_info_retention'

    # TODO: review defaults for retention policy
    RETENTION = '5w'  # Weeks
    REPLICATION = '1'

    METRICS_ENDPOINT = 'stats'
    DEFAULT_CRAWLER_HTTP_PORT = 9555

    ERROR_EVENTS = {
        StakingEscrowAgent: ['Slashed'],
        AdjudicatorAgent: ['IncorrectCFragVerdict'],
        PolicyManagerAgent: ['NodeBrokenState'],
    }

    def __init__(self,
                 influx_host: str,
                 influx_port: int,
                 crawler_http_port: int = DEFAULT_CRAWLER_HTTP_PORT,
                 registry: BaseContractRegistry = None,
                 node_storage_filepath: str = CrawlerNodeStorage.
                 DEFAULT_DB_FILEPATH,
                 refresh_rate=DEFAULT_REFRESH_RATE,
                 restart_on_error=True,
                 *args,
                 **kwargs):

        # Settings
        self.federated_only = False  # Nope - for compatibility with Learner TODO # nucypher/466
        Teacher.set_federated_mode(False)

        self.registry = registry or InMemoryContractRegistry.from_latest_publication(
        )
        self._refresh_rate = refresh_rate
        self._restart_on_error = restart_on_error

        # TODO: Needs cleanup
        # Tracking
        node_storage = CrawlerNodeStorage(
            storage_filepath=node_storage_filepath)

        class MonitoringTracker(FleetStateTracker):
            def record_fleet_state(self, *args, **kwargs):
                new_state_or_none = super().record_fleet_state(*args, **kwargs)
                if new_state_or_none:
                    _, new_state = new_state_or_none
                    state = self.abridged_state_details(new_state)
                    node_storage.store_state_metadata(state)

        self.tracker_class = MonitoringTracker

        super().__init__(save_metadata=True,
                         node_storage=node_storage,
                         *args,
                         **kwargs)
        self.log = Logger(self.__class__.__name__)
        self.log.info(
            f"Storing node metadata in DB: {node_storage.db_filepath}")
        self.log.info(
            f"Storing blockchain metadata in DB: {influx_host}:{influx_port}")

        # In-memory Metrics
        self._stats = {'status': 'initializing'}
        self._crawler_client = None

        # Initialize InfluxDB
        self._db_host = influx_host
        self._db_port = influx_port
        self._influx_client = None

        # Agency
        self.staking_agent = ContractAgency.get_agent(StakingEscrowAgent,
                                                      registry=self.registry)

        # Crawler Tasks
        self.__collection_round = 0
        self.__collecting_nodes = False  # thread tracking
        self.__collecting_stats = False
        self.__events_from_block = 0  # from the beginning
        self.__collecting_events = False

        self._node_details_task = task.LoopingCall(self._learn_about_nodes)
        self._stats_collection_task = task.LoopingCall(self._collect_stats,
                                                       threaded=True)
        self._events_collection_task = task.LoopingCall(self._collect_events)

        # JSON Endpoint
        self._crawler_http_port = crawler_http_port
        self._flask = None

    def _initialize_influx(self):
        try:
            db_list = self._influx_client.get_list_database()
        except requests.exceptions.ConnectionError:
            raise ConnectionError(
                f"No connection to InfluxDB at {self._db_host}:{self._db_port}"
            )
        found_db = (list(
            filter(lambda db: db['name'] == self.INFLUX_DB_NAME, db_list)))
        if len(found_db) == 0:
            # db not previously created
            self.log.info(
                f'Database {self.INFLUX_DB_NAME} not found, creating it')
            self._influx_client.create_database(self.INFLUX_DB_NAME)
            self._influx_client.create_retention_policy(
                name=self.INFLUX_RETENTION_POLICY_NAME,
                duration=self.RETENTION,
                replication=self.REPLICATION,
                database=self.INFLUX_DB_NAME,
                default=True)
        else:
            self.log.info(
                f'Database {self.INFLUX_DB_NAME} already exists, no need to create it'
            )

    def learn_from_teacher_node(self, *args, **kwargs):
        try:
            current_teacher = self.current_teacher_node(cycle=False)
        except self.NotEnoughTeachers as e:
            self.log.warn("Can't learn right now: {}".format(e.args[0]))
            return

        new_nodes = super().learn_from_teacher_node(*args, **kwargs)

        # update metadata of teacher - not just in memory but in the underlying storage system (db in this case)
        self.node_storage.store_node_metadata(current_teacher)
        self.node_storage.store_current_teacher(
            current_teacher.checksum_address)

        return new_nodes

    #
    # Measurements
    #

    @property
    def stats(self) -> dict:
        return self._stats

    @collector(label="Projected Stake and Stakers")
    def _measure_future_locked_tokens(self, periods: int = 365):
        period_range = range(1, periods + 1)
        token_counter = dict()
        for day in period_range:
            tokens, stakers = self.staking_agent.get_all_active_stakers(
                periods=day)
            token_counter[day] = (float(NU.from_nunits(tokens).to_tokens()),
                                  len(stakers))
        return dict(token_counter)

    @collector(label="Top Stakes")
    def _measure_top_stakers(self) -> dict:
        _, stakers = self.staking_agent.get_all_active_stakers(periods=1)
        data = dict()
        for staker, stake in stakers:
            staker_address = to_checksum_address(staker)
            data[staker_address] = float(NU.from_nunits(stake).to_tokens())
        data = dict(sorted(data.items(), key=lambda s: s[1], reverse=True))
        return data

    @collector(label="Staker Confirmation Status")
    def _measure_staker_activity(self) -> dict:
        confirmed, pending, inactive = self.staking_agent.partition_stakers_by_activity(
        )
        stakers = dict()
        stakers['active'] = len(confirmed)
        stakers['pending'] = len(pending)
        stakers['inactive'] = len(inactive)
        return stakers

    @collector(label="Time Until Next Period")
    def _measure_time_remaining(self) -> str:
        current_period = self.staking_agent.get_current_period()
        economics = EconomicsFactory.get_economics(registry=self.registry)
        next_period = datetime_at_period(
            period=current_period + 1,
            seconds_per_period=economics.seconds_per_period)
        remaining = str(next_period - maya.now())
        return remaining

    @collector(label="Known Nodes")
    def measure_known_nodes(self):

        #
        # Setup
        #

        current_period = self.staking_agent.get_current_period()
        buckets = {
            -1: ('green', 'Confirmed'),  # Confirmed Next Period
            0: ('#e0b32d', 'Pending'),  # Pending Confirmation of Next Period
            current_period: ('#525ae3', 'Idle'),  # Never confirmed
            BlockchainInterface.NULL_ADDRESS:
            ('#d8d9da', 'Headless')  # Headless Staker (No Worker)
        }

        shortest_uptime, newborn = float('inf'), None
        longest_uptime, uptime_king = 0, None

        uptime_template = '{days}d:{hours}h:{minutes}m'

        #
        # Scrape
        #

        payload = defaultdict(list)
        known_nodes = self._crawler_client.get_known_nodes_metadata()
        for staker_address in known_nodes:

            #
            # Confirmation Status Scraping
            #

            last_confirmed_period = self.staking_agent.get_last_active_period(
                staker_address)
            missing_confirmations = current_period - last_confirmed_period
            worker = self.staking_agent.get_worker_from_staker(staker_address)
            if worker == BlockchainInterface.NULL_ADDRESS:
                # missing_confirmations = BlockchainInterface.NULL_ADDRESS
                continue  # TODO: Skip this DetachedWorker and do not display it
            try:
                color, status_message = buckets[missing_confirmations]
            except KeyError:
                color, status_message = 'red', f'Unconfirmed'
            node_status = {
                'status': status_message,
                'missed_confirmations': missing_confirmations,
                'color': color
            }

            #
            # Uptime Scraping
            #

            now = maya.now()
            timestamp = maya.MayaDT.from_iso8601(
                known_nodes[staker_address]['timestamp'])
            delta = now - timestamp

            node_qualifies_as_newborn = (
                delta.total_seconds() <
                shortest_uptime) and missing_confirmations == -1
            node_qualifies_for_uptime_king = (
                delta.total_seconds() >
                longest_uptime) and missing_confirmations == -1
            if node_qualifies_as_newborn:
                shortest_uptime, newborn = delta.total_seconds(
                ), staker_address
            elif node_qualifies_for_uptime_king:
                longest_uptime, uptime_king = delta.total_seconds(
                ), staker_address

            hours = delta.seconds // 3600
            minutes = delta.seconds % 3600 // 60
            natural_uptime = uptime_template.format(days=delta.days,
                                                    hours=hours,
                                                    minutes=minutes)

            #
            # Aggregate
            #

            known_nodes[staker_address]['status'] = node_status
            known_nodes[staker_address]['uptime'] = natural_uptime
            payload[status_message.lower()].append(known_nodes[staker_address])

        # There are not always winners...
        if newborn:
            known_nodes[newborn]['newborn'] = True
        if uptime_king:
            known_nodes[uptime_king]['uptime_king'] = True
        return payload

    def _collect_stats(self, threaded: bool = True) -> None:
        # TODO: Handle faulty connection to provider (requests.exceptions.ReadTimeout)
        if threaded:
            if self.__collecting_stats:
                self.log.debug(
                    "Skipping Round - Metrics collection thread is already running"
                )
                return
            return reactor.callInThread(self._collect_stats, threaded=False)
        self.__collection_round += 1
        self.__collecting_stats = True

        start = maya.now()
        click.secho(
            f"Scraping Round #{self.__collection_round} ========================",
            color='blue')
        self.log.info("Collecting Statistics...")

        #
        # Read
        #

        # Time
        block_time = self.staking_agent.blockchain.client.w3.eth.getBlock(
            'latest').timestamp  # epoch
        current_period = self.staking_agent.get_current_period()
        click.secho("✓ ... Current Period", color='blue')
        time_remaining = self._measure_time_remaining()

        # Nodes
        teacher = self._crawler_client.get_current_teacher_checksum()
        states = self._crawler_client.get_previous_states_metadata()

        known_nodes = self.measure_known_nodes()

        activity = self._measure_staker_activity()

        # Stake
        future_locked_tokens = self._measure_future_locked_tokens()
        global_locked_tokens = self.staking_agent.get_global_locked_tokens()
        click.secho("✓ ... Global Network Locked Tokens", color='blue')

        top_stakers = self._measure_top_stakers()

        #
        # Write
        #

        self._stats = {
            'blocktime': block_time,
            'current_period': current_period,
            'next_period': time_remaining,
            'prev_states': states,
            'current_teacher': teacher,
            'known_nodes': len(self.known_nodes),
            'activity': activity,
            'node_details': known_nodes,
            'global_locked_tokens': global_locked_tokens,
            'future_locked_tokens': future_locked_tokens,
            'top_stakers': top_stakers,
        }
        done = maya.now()
        delta = done - start
        self.__collecting_stats = False
        click.echo(
            f"Scraping round completed (duration {delta}).",
            color='yellow')  # TODO: Make optional, use emitter, or remove
        click.echo("==========================================")
        self.log.debug(f"Collected new metrics took {delta}.")

    @collector(label="Network Event Details")
    def _collect_events(self, threaded: bool = True):
        if threaded:
            if self.__collecting_events:
                self.log.debug(
                    "Skipping Round - Events collection thread is already running"
                )
                return
            return reactor.callInThread(self._collect_events, threaded=False)
        self.__collecting_events = True

        blockchain_client = self.staking_agent.blockchain.client
        latest_block = blockchain_client.w3.eth.getBlock('latest')
        from_block = self.__events_from_block

        block_time = latest_block.timestamp  # precision in seconds
        current_period = self.staking_agent.get_current_period()

        events_list = list()
        for agent_class, event_names in self.ERROR_EVENTS.items():
            agent = ContractAgency.get_agent(agent_class,
                                             registry=self.registry)
            for event_name in event_names:
                events = [agent.contract.events[event_name]]
                for event in events:
                    event_filter = event.createFilter(
                        fromBlock=from_block, toBlock=latest_block.number)
                    entries = event_filter.get_all_entries()
                    for event_record in entries:
                        record = EventRecord(event_record)
                        args = ", ".join(f"{k}:{v}"
                                         for k, v in record.args.items())
                        events_list.append(
                            self.EVENT_LINE_PROTOCOL.format(
                                measurement=self.EVENT_MEASUREMENT,
                                txhash=record.transaction_hash,
                                contract_name=agent.contract_name,
                                contract_address=agent.contract_address,
                                event_name=event_name,
                                block_number=record.block_number,
                                args=args,
                                timestamp=blockchain_client.w3.eth.getBlock(
                                    record.block_number).timestamp,
                            ))

        success = self._influx_client.write_points(
            events_list,
            database=self.INFLUX_DB_NAME,
            time_precision='s',
            batch_size=10000,
            protocol='line')
        self.__events_from_block = latest_block.number
        self.__collecting_events = False
        if not success:
            # TODO: What do we do here - Event hook for alerting?
            self.log.warn(
                f'Unable to write events to database {self.INFLUX_DB_NAME} at '
                f'{MayaDT(epoch=block_time)} | Period {current_period} starting from block {from_block}'
            )

    @collector(label="Known Node Details")
    def _learn_about_nodes(self, threaded: bool = True):
        if threaded:
            if self.__collecting_nodes:
                self.log.debug(
                    "Skipping Round - Nodes collection thread is already running"
                )
                return
            return reactor.callInThread(self._learn_about_nodes,
                                        threaded=False)
        self.__collecting_nodes = True

        agent = self.staking_agent
        known_nodes = list(self.known_nodes)

        block_time = agent.blockchain.client.w3.eth.getBlock(
            'latest').timestamp  # precision in seconds
        current_period = agent.get_current_period()

        log = f'Processing {len(known_nodes)} nodes at {MayaDT(epoch=block_time)} | Period {current_period}'
        self.log.info(log)

        data = list()
        for node in known_nodes:

            staker_address = node.checksum_address
            worker = agent.get_worker_from_staker(staker_address)

            stake = agent.owned_tokens(staker_address)
            staked_nu_tokens = float(NU.from_nunits(stake).to_tokens())
            locked_nu_tokens = float(
                NU.from_nunits(
                    agent.get_locked_tokens(
                        staker_address=staker_address)).to_tokens())

            economics = EconomicsFactory.get_economics(registry=self.registry)
            stakes = StakeList(checksum_address=staker_address,
                               registry=self.registry)
            stakes.refresh()

            if stakes.initial_period is NOT_STAKING:
                continue  # TODO: Skip this measurement for now

            start_date = datetime_at_period(
                stakes.initial_period,
                seconds_per_period=economics.seconds_per_period)
            start_date = start_date.datetime().timestamp()
            end_date = datetime_at_period(
                stakes.terminal_period,
                seconds_per_period=economics.seconds_per_period)
            end_date = end_date.datetime().timestamp()

            last_confirmed_period = agent.get_last_active_period(
                staker_address)

            num_work_orders = 0  # len(node.work_orders())  # TODO: Only works for is_me with datastore attached

            # TODO: do we need to worry about how much information is in memory if number of nodes is
            #  large i.e. should I check for size of data and write within loop if too big
            data.append(
                self.NODE_LINE_PROTOCOL.format(
                    measurement=self.NODE_MEASUREMENT,
                    staker_address=staker_address,
                    worker_address=worker,
                    start_date=start_date,
                    end_date=end_date,
                    stake=staked_nu_tokens,
                    locked_stake=locked_nu_tokens,
                    current_period=current_period,
                    last_confirmed_period=last_confirmed_period,
                    timestamp=block_time,
                    work_orders=num_work_orders))

        success = self._influx_client.write_points(
            data,
            database=self.INFLUX_DB_NAME,
            time_precision='s',
            batch_size=10000,
            protocol='line')
        self.__collecting_nodes = False
        if not success:
            # TODO: What do we do here - Event hook for alerting?
            self.log.warn(
                f'Unable to write node information to database {self.INFLUX_DB_NAME} at '
                f'{MayaDT(epoch=block_time)} | Period {current_period}')

    def make_flask_server(self):
        """JSON Endpoint"""
        flask = Flask('nucypher-monitor')
        self._flask = flask
        self._flask.config["JSONIFY_PRETTYPRINT_REGULAR"] = True

        @flask.route('/stats', methods=['GET'])
        def stats():
            response = jsonify(self._stats)
            return response

    def _handle_errors(self, *args, **kwargs):
        failure = args[0]
        cleaned_traceback = failure.getTraceback().replace('{', '').replace(
            '}', '')
        if self._restart_on_error:
            self.log.warn(
                f'Unhandled error: {cleaned_traceback}. Attempting to restart crawler'
            )
            if not self._node_details_task.running:
                self.start()
        else:
            self.log.critical(f'Unhandled error: {cleaned_traceback}')

    def start(self, eager: bool = False):
        """Start the crawler if not already running"""
        if not self.is_running:
            self.log.info('Starting Crawler...')
            if self._influx_client is None:
                self._influx_client = InfluxDBClient(
                    host=self._db_host,
                    port=self._db_port,
                    database=self.INFLUX_DB_NAME)
                self._initialize_influx()

            if self._crawler_client is None:
                from monitor.db import CrawlerStorageClient
                self._crawler_client = CrawlerStorageClient()

                # TODO: Maybe?
                # from monitor.db import CrawlerInfluxClient
                # self.crawler_influx_client = CrawlerInfluxClient()

            # start tasks
            node_learner_deferred = self._node_details_task.start(
                interval=self._refresh_rate, now=eager)
            collection_deferred = self._stats_collection_task.start(
                interval=self._refresh_rate, now=eager)

            # get known last event block
            self.__events_from_block = self._get_last_known_blocknumber()
            events_deferred = self._events_collection_task.start(
                interval=self._refresh_rate, now=eager)

            # hookup error callbacks
            node_learner_deferred.addErrback(self._handle_errors)
            collection_deferred.addErrback(self._handle_errors)
            events_deferred.addErrback(self._handle_errors)

            # Start up
            self.start_learning_loop(now=False)
            self.make_flask_server()
            hx_deployer = HendrixDeploy(action="start",
                                        options={
                                            "wsgi": self._flask,
                                            "http_port":
                                            self._crawler_http_port
                                        })
            hx_deployer.run()  # <--- Blocking Call to Reactor

    def stop(self):
        """Stop the crawler if currently running"""
        if self.is_running:
            self.log.info('Stopping Monitor Crawler')

            # stop tasks
            self._node_details_task.stop()
            self._events_collection_task.stop()
            self._stats_collection_task.stop()

            if self._influx_client is not None:
                self._influx_client.close()
                self._influx_client = None

    @property
    def is_running(self):
        """Returns True if currently running, False otherwise"""
        return self._node_details_task.running

    def _get_last_known_blocknumber(self):
        last_known_blocknumber = 0
        blocknumber_result = list(
            self._influx_client.query(
                f'SELECT MAX(block_number) from {self.EVENT_MEASUREMENT}').
            get_points())
        if len(blocknumber_result) > 0:
            last_known_blocknumber = blocknumber_result[0]['max']

        return last_known_blocknumber
Beispiel #16
0
class WorkTracker:

    CLOCK = reactor
    REFRESH_RATE = 60 * 15  # Fifteen minutes

    def __init__(self, worker, refresh_rate: int = None, *args, **kwargs):

        super().__init__(*args, **kwargs)
        self.log = Logger('stake-tracker')
        self.worker = worker
        self.staking_agent = self.worker.staking_agent

        self._refresh_rate = refresh_rate or self.REFRESH_RATE
        self._tracking_task = task.LoopingCall(self._do_work)
        self._tracking_task.clock = self.CLOCK

        self.__requirement = None
        self.__current_period = None
        self.__start_time = NOT_STAKING
        self.__uptime_period = NOT_STAKING
        self._abort_on_error = True

    @property
    def current_period(self):
        return self.__current_period

    def stop(self) -> None:
        if self._tracking_task.running:
            self._tracking_task.stop()
            self.log.info(f"STOPPED WORK TRACKING")

    def start(self,
              act_now: bool = False,
              requirement_func: Callable = None,
              force: bool = False) -> None:
        """
        High-level stake tracking initialization, this function aims
        to be safely called at any time - For example, it is okay to call
        this function multiple times within the same period.
        """
        if self._tracking_task.running and not force:
            return

        # Add optional confirmation requirement callable
        self.__requirement = requirement_func

        # Record the start time and period
        self.__start_time = maya.now()
        self.__uptime_period = self.staking_agent.get_current_period()
        self.__current_period = self.__uptime_period

        self.log.info(f"START WORK TRACKING")
        d = self._tracking_task.start(interval=self._refresh_rate, now=act_now)
        d.addErrback(self.handle_working_errors)

    def _crash_gracefully(self, failure=None) -> None:
        """
        A facility for crashing more gracefully in the event that
        an exception is unhandled in a different thread.
        """
        self._crashed = failure
        failure.raiseException()

    def handle_working_errors(self, *args, **kwargs) -> None:
        failure = args[0]
        if self._abort_on_error:
            self.log.critical(
                'Unhandled error during node work tracking. {failure!r}',
                failure=failure)
            reactor.callFromThread(self._crash_gracefully, failure=failure)
        else:
            self.log.warn(
                'Unhandled error during work tracking: {failure.getTraceback()!r}',
                failure=failure)

    def __check_work_requirement(self) -> bool:
        # TODO: Check for stake expiration and exit
        if self.__requirement is None:
            return True
        try:
            r = self.__requirement()
            if not isinstance(r, bool):
                raise ValueError(f"'requirement' must return a boolean.")
        except TypeError:
            raise ValueError(f"'requirement' must be a callable.")
        return r

    def _do_work(self) -> None:
        # TODO: #1515 Shut down at end of terminal stake

        # Update on-chain status
        self.log.info(
            f"Checking for new period. Current period is {self.__current_period}"
        )
        onchain_period = self.staking_agent.get_current_period(
        )  # < -- Read from contract
        if self.current_period != onchain_period:
            self.__current_period = onchain_period
            # self.worker.stakes.refresh()  # TODO: #1517 Track stakes for fast access to terminal period.

        # Measure working interval
        interval = onchain_period - self.worker.last_committed_period
        if interval < 0:
            return  # No need to commit to this period.  Save the gas.
        if interval > 0:
            # TODO: #1516 Follow-up actions for downtime
            self.log.warn(
                f"MISSED COMMITMENTS - {interval} missed staking commitments detected."
            )

        # Only perform work this round if the requirements are met
        if not self.__check_work_requirement():
            self.log.warn(
                f'COMMIT PREVENTED (callable: "{self.__requirement.__name__}") - '
                f'There are unmet commit requirements.')
            # TODO: Follow-up actions for downtime
            return

        # Make a Commitment
        self.log.info("Made a commitment to period {}".format(
            self.current_period))
        transacting_power = self.worker.transacting_power
        with transacting_power:
            self.worker.commit_to_next_period()  # < --- blockchain WRITE
Beispiel #17
0
class EthereumContractRegistry:
    """
    Records known contracts on the disk for future access and utility. This
    lazily writes to the filesystem during contract enrollment.

    WARNING: Unless you are developing NuCypher, you most likely won't ever need
    to use this.
    """

    _multi_contract = True
    _contract_name = NotImplemented

    _default_registry_filepath = os.path.join(DEFAULT_CONFIG_ROOT,
                                              'contract_registry.json')

    class RegistryError(Exception):
        pass

    class EmptyRegistry(RegistryError):
        pass

    class NoRegistry(RegistryError):
        pass

    class UnknownContract(RegistryError):
        pass

    class IllegalRegistry(RegistryError):
        """Raised when invalid data is encountered in the registry"""

    def __init__(self, registry_filepath: str = None) -> None:
        self.log = Logger("registry")
        self.__filepath = registry_filepath or self._default_registry_filepath

    @property
    def filepath(self):
        return self.__filepath

    @property
    def enrolled_names(self):
        entries = iter(record[0] for record in self.read())
        return entries

    @property
    def enrolled_addresses(self):
        entries = iter(record[1] for record in self.read())
        return entries

    def _swap_registry(self, filepath: str) -> bool:
        self.__filepath = filepath
        return True

    def _destroy(self) -> None:
        os.remove(self.filepath)

    def write(self, registry_data: list) -> None:
        """
        Writes the registry data list as JSON to the registry file. If no
        file exists, it will create it and write the data. If a file does exist
        it will _overwrite_ everything in it.
        """
        with open(self.__filepath, 'w+') as registry_file:
            registry_file.seek(0)
            registry_file.write(json.dumps(registry_data))
            registry_file.truncate()

    def read(self) -> Union[list, dict]:
        """
        Reads the registry file and parses the JSON and returns a list.
        If the file is empty it will return an empty list.
        If you are modifying or updating the registry file, you _must_ call
        this function first to get the current state to append to the dict or
        modify it because _write_registry_file overwrites the file.
        """

        try:
            with open(self.__filepath, 'r') as registry_file:
                self.log.debug("Reading from registrar: filepath {}".format(
                    self.__filepath))
                registry_file.seek(0)
                file_data = registry_file.read()
                if file_data:
                    registry_data = json.loads(file_data)
                else:
                    registry_data = list() if self._multi_contract else dict()

        except FileNotFoundError:
            raise self.NoRegistry("No registry at filepath: {}".format(
                self.__filepath))

        except JSONDecodeError:
            raise

        return registry_data

    def enroll(self, contract_name, contract_address, contract_abi):
        """
        Enrolls a contract to the chain registry by writing the name, address,
        and abi information to the filesystem as JSON.

        Note: Unless you are developing NuCypher, you most likely won't ever
        need to use this.
        """
        contract_data = [contract_name, contract_address, contract_abi]
        try:
            registry_data = self.read()
        except self.RegistryError:
            self.log.info("Blank registry encountered: enrolling {}:{}".format(
                contract_name, contract_address))
            registry_data = list()  # empty registry

        registry_data.append(contract_data)
        self.write(registry_data)
        self.log.info("Enrolled {}:{} into registry {}".format(
            contract_name, contract_address, self.filepath))

    def search(self, contract_name: str = None, contract_address: str = None):
        """
        Searches the registry for a contract with the provided name or address
        and returns the contracts component data.
        """
        if not (bool(contract_name) ^ bool(contract_address)):
            raise ValueError(
                "Pass contract_name or contract_address, not both.")

        contracts = list()
        registry_data = self.read()

        try:
            for name, addr, abi in registry_data:
                if contract_name == name or contract_address == addr:
                    contracts.append((name, addr, abi))
        except ValueError:
            message = "Missing or corrupted registry data".format(
                self.__filepath)
            self.log.critical(message)
            raise self.IllegalRegistry(message)

        if not contracts:
            raise self.UnknownContract(": {}".format(contract_name))

        if contract_address and len(contracts) > 1:
            m = "Multiple records returned for address {}"
            self.log.critical(m)
            raise self.IllegalRegistry(m.format(contract_address))

        return contracts if contract_name else contracts[0]
Beispiel #18
0
class BaseContractRegistry(ABC):
    """
    Records known contracts on the disk for future access and utility. This
    lazily writes to the filesystem during contract enrollment.

    WARNING: Unless you are developing NuCypher, you most likely won't ever need
    to use this.
    """

    logger = Logger('ContractRegistry')

    _multi_contract = True
    _contract_name = NotImplemented

    # Registry
    REGISTRY_NAME = 'contract_registry.json'  # TODO: Save registry with ID-time-based filename
    DEVELOPMENT_REGISTRY_NAME = 'dev_contract_registry.json'

    _PUBLICATION_USER = "******"
    _PUBLICATION_REPO = f"{_PUBLICATION_USER}/ethereum-contract-registry"
    _PUBLICATION_BRANCH = 'goerli'          # TODO: Allow other branches to be used

    class RegistryError(Exception):
        pass

    class RegistrySourceUnavailable(RegistryError):
        pass

    class EmptyRegistry(RegistryError):
        pass

    class NoRegistry(RegistryError):
        pass

    class UnknownContract(RegistryError):
        pass

    class InvalidRegistry(RegistryError):
        """Raised when invalid data is encountered in the registry"""

    def __init__(self, *args, **kwargs):
        self.log = Logger("registry")

    def __eq__(self, other) -> bool:
        if self is other:
            return True  # and that's all
        return bool(self.id == other.id)

    def __repr__(self) -> str:
        r = f"{self.__class__.__name__}(id={self.id[:6]})"
        return r

    @property
    def id(self) -> str:
        """Returns a hexstr of the registry contents."""
        blake = hashlib.blake2b()
        blake.update(json.dumps(self.read()).encode())
        digest = blake.digest().hex()
        return digest

    @abstractmethod
    def _destroy(self) -> None:
        raise NotImplementedError

    @abstractmethod
    def write(self, registry_data: list) -> None:
        raise NotImplementedError

    @abstractmethod
    def read(self) -> Union[list, dict]:
        raise NotImplementedError

    @classmethod
    def get_publication_endpoint(cls) -> str:
        url = f'https://raw.githubusercontent.com/{cls._PUBLICATION_REPO}/{cls._PUBLICATION_BRANCH}/{cls.REGISTRY_NAME}'
        return url

    @classmethod
    def fetch_latest_publication(cls) -> bytes:
        # Setup
        publication_endpoint = cls.get_publication_endpoint()
        cls.logger.debug(f"Downloading contract registry from {publication_endpoint}")
        response = requests.get(publication_endpoint)

        # Fetch
        if response.status_code != 200:
            error = f"Failed to fetch registry from {publication_endpoint} with status code {response.status_code}"
            raise cls.RegistrySourceUnavailable(error)

        registry_data = response.content
        return registry_data

    @classmethod
    def from_latest_publication(cls, *args, **kwargs) -> 'BaseContractRegistry':
        """
        Get the latest published contract registry from github and save it on the local file system.
        """
        registry_data_bytes = cls.fetch_latest_publication()
        instance = cls(*args, **kwargs)
        instance.write(registry_data=json.loads(registry_data_bytes))
        return instance

    @property
    def enrolled_names(self) -> Iterator:
        entries = iter(record[0] for record in self.read())
        return entries

    @property
    def enrolled_addresses(self) -> Iterator:
        entries = iter(record[1] for record in self.read())
        return entries

    def enroll(self, contract_name, contract_address, contract_abi) -> None:
        """
        Enrolls a contract to the chain registry by writing the name, address,
        and abi information to the filesystem as JSON.

        Note: Unless you are developing NuCypher, you most likely won't ever
        need to use this.
        """
        contract_data = [contract_name, contract_address, contract_abi]
        try:
            registry_data = self.read()
        except self.RegistryError:
            self.log.info("Blank registry encountered: enrolling {}:{}".format(contract_name, contract_address))
            registry_data = list()  # empty registry

        registry_data.append(contract_data)
        self.write(registry_data)
        self.log.info("Enrolled {}:{} into registry.".format(contract_name, contract_address))

    def search(self, contract_name: str = None, contract_address: str = None) -> tuple:
        """
        Searches the registry for a contract with the provided name or address
        and returns the contracts component data.
        """
        if not (bool(contract_name) ^ bool(contract_address)):
            raise ValueError("Pass contract_name or contract_address, not both.")

        contracts = list()
        registry_data = self.read()

        try:
            for name, addr, abi in registry_data:
                if contract_name == name or contract_address == addr:
                    contracts.append((name, addr, abi))
        except ValueError:
            message = "Missing or corrupted registry data"
            self.log.critical(message)
            raise self.InvalidRegistry(message)

        if not contracts:
            raise self.UnknownContract(contract_name)

        if contract_address and len(contracts) > 1:
            m = f"Multiple records returned for address {contract_address}"
            self.log.critical(m)
            raise self.InvalidRegistry(m)

        result = tuple(contracts) if contract_name else contracts[0]
        return result
Beispiel #19
0
class ContractAdministrator(NucypherTokenActor):
    """
    The administrator of network contracts.
    """

    __interface_class = BlockchainDeployerInterface

    #
    # Deployer classes sorted by deployment dependency order.
    #

    standard_deployer_classes = (NucypherTokenDeployer, )

    dispatched_upgradeable_deployer_classes = (
        StakingEscrowDeployer,
        PolicyManagerDeployer,
        AdjudicatorDeployer,
    )

    upgradeable_deployer_classes = (
        *dispatched_upgradeable_deployer_classes,
        UserEscrowProxyDeployer,
    )

    deployer_classes = (*standard_deployer_classes,
                        *upgradeable_deployer_classes)

    class UnknownContract(ValueError):
        pass

    def __init__(self,
                 registry: BaseContractRegistry,
                 deployer_address: str = None,
                 client_password: str = None,
                 economics: TokenEconomics = None):
        """
        Note: super() is not called here to avoid setting the token agent.
        TODO: Review this logic ^^ "bare mode".
        """
        self.log = Logger("Deployment-Actor")

        self.deployer_address = deployer_address
        self.checksum_address = self.deployer_address
        self.economics = economics or StandardTokenEconomics()

        self.registry = registry
        self.user_escrow_deployers = dict()
        self.deployers = {d.contract_name: d for d in self.deployer_classes}

        self.transacting_power = TransactingPower(password=client_password,
                                                  account=deployer_address)
        self.transacting_power.activate()

    def __repr__(self):
        r = '{name} - {deployer_address})'.format(
            name=self.__class__.__name__,
            deployer_address=self.deployer_address)
        return r

    def __get_deployer(self, contract_name: str):
        try:
            Deployer = self.deployers[contract_name]
        except KeyError:
            raise self.UnknownContract(contract_name)
        return Deployer

    @staticmethod
    def collect_deployment_secret(deployer) -> str:
        secret = click.prompt(
            f'Enter {deployer.contract_name} Deployment Secret',
            hide_input=True,
            confirmation_prompt=True)
        return secret

    def collect_deployment_secrets(self) -> dict:
        secrets = dict()
        for deployer in self.upgradeable_deployer_classes:
            secrets[deployer.contract_name] = self.collect_deployment_secret(
                deployer)
        return secrets

    def deploy_contract(
        self,
        contract_name: str,
        gas_limit: int = None,
        plaintext_secret: str = None,
        progress=None,
        *args,
        **kwargs,
    ) -> Tuple[dict, ContractDeployer]:

        Deployer = self.__get_deployer(contract_name=contract_name)
        deployer = Deployer(registry=self.registry,
                            deployer_address=self.deployer_address,
                            economics=self.economics,
                            *args,
                            **kwargs)
        if Deployer._upgradeable:
            if not plaintext_secret:
                raise ValueError(
                    "Upgrade plaintext_secret must be passed to deploy an upgradeable contract."
                )
            secret_hash = keccak(bytes(plaintext_secret, encoding='utf-8'))
            txhashes = deployer.deploy(secret_hash=secret_hash,
                                       gas_limit=gas_limit,
                                       progress=progress)
        else:
            txhashes = deployer.deploy(gas_limit=gas_limit, progress=progress)
        return txhashes, deployer

    def upgrade_contract(self, contract_name: str,
                         existing_plaintext_secret: str,
                         new_plaintext_secret: str) -> dict:
        Deployer = self.__get_deployer(contract_name=contract_name)
        deployer = Deployer(registry=self.registry,
                            deployer_address=self.deployer_address)
        new_secret_hash = keccak(bytes(new_plaintext_secret, encoding='utf-8'))
        txhashes = deployer.upgrade(existing_secret_plaintext=bytes(
            existing_plaintext_secret, encoding='utf-8'),
                                    new_secret_hash=new_secret_hash)
        return txhashes

    def rollback_contract(self, contract_name: str,
                          existing_plaintext_secret: str,
                          new_plaintext_secret: str):
        Deployer = self.__get_deployer(contract_name=contract_name)
        deployer = Deployer(registry=self.registry,
                            deployer_address=self.deployer_address)
        new_secret_hash = keccak(bytes(new_plaintext_secret, encoding='utf-8'))
        txhash = deployer.rollback(existing_secret_plaintext=bytes(
            existing_plaintext_secret, encoding='utf-8'),
                                   new_secret_hash=new_secret_hash)
        return txhash

    def deploy_user_escrow(self, allocation_registry: AllocationRegistry):
        user_escrow_deployer = UserEscrowDeployer(
            registry=self.registry,
            deployer_address=self.deployer_address,
            allocation_registry=allocation_registry)
        user_escrow_deployer.deploy()
        principal_address = user_escrow_deployer.contract.address
        self.user_escrow_deployers[principal_address] = user_escrow_deployer
        return user_escrow_deployer

    def deploy_network_contracts(self,
                                 secrets: dict,
                                 interactive: bool = True,
                                 emitter: StdoutEmitter = None,
                                 etherscan: bool = False) -> dict:
        """

        :param secrets: Contract upgrade secrets dictionary
        :param interactive: If True, wait for keypress after each contract deployment
        :param emitter: A console output emitter instance. If emitter is None, no output will be echoed to the console.
        :param etherscan: Open deployed contracts in Etherscan
        :return: Returns a dictionary of deployment receipts keyed by contract name
        """

        if interactive and not emitter:
            raise ValueError(
                "'emitter' is a required keyword argument when interactive is True."
            )

        deployment_receipts = dict()
        gas_limit = None  # TODO: Gas management

        # deploy contracts
        total_deployment_transactions = 0
        for deployer_class in self.deployer_classes:
            total_deployment_transactions += len(
                deployer_class.deployment_steps)

        first_iteration = True
        with click.progressbar(length=total_deployment_transactions,
                               label="Deployment progress",
                               show_eta=False) as bar:
            bar.short_limit = 0
            for deployer_class in self.deployer_classes:
                if interactive and not first_iteration:
                    click.pause(
                        info=
                        f"\nPress any key to continue with deployment of {deployer_class.contract_name}"
                    )

                if emitter:
                    emitter.echo(
                        f"\nDeploying {deployer_class.contract_name} ...")
                    bar._last_line = None
                    bar.render_progress()

                if deployer_class in self.standard_deployer_classes:
                    receipts, deployer = self.deploy_contract(
                        contract_name=deployer_class.contract_name,
                        gas_limit=gas_limit,
                        progress=bar)
                else:
                    receipts, deployer = self.deploy_contract(
                        contract_name=deployer_class.contract_name,
                        plaintext_secret=secrets[deployer_class.contract_name],
                        gas_limit=gas_limit,
                        progress=bar)

                if emitter:
                    blockchain = BlockchainInterfaceFactory.get_interface()
                    paint_contract_deployment(
                        contract_name=deployer_class.contract_name,
                        receipts=receipts,
                        contract_address=deployer.contract_address,
                        emitter=emitter,
                        chain_name=blockchain.client.chain_name,
                        open_in_browser=etherscan)

                deployment_receipts[deployer_class.contract_name] = receipts
                first_iteration = False

        return deployment_receipts

    def relinquish_ownership(self,
                             new_owner: str,
                             emitter: StdoutEmitter = None,
                             interactive: bool = True,
                             transaction_gas_limit: int = None) -> dict:

        if not is_checksum_address(new_owner):
            raise ValueError(
                f"{new_owner} is an invalid EIP-55 checksum address.")

        receipts = dict()

        for contract_deployer in self.upgradeable_deployer_classes:
            deployer = contract_deployer(
                registry=self.registry, deployer_address=self.deployer_address)
            deployer.transfer_ownership(
                new_owner=new_owner,
                transaction_gas_limit=transaction_gas_limit)

            if emitter:
                emitter.echo(
                    f"Transferred ownership of {deployer.contract_name} to {new_owner}"
                )

            if interactive:
                click.pause(info="Press any key to continue")

            receipts[contract_deployer.contract_name] = receipts

        return receipts

    def deploy_beneficiary_contracts(
        self,
        allocations: List[Dict[str, Union[str, int]]],
        allocation_outfile: str = None,
        allocation_registry: AllocationRegistry = None,
        crash_on_failure: bool = True,
    ) -> Dict[str, dict]:
        """

        Example allocation dataset (one year is 31536000 seconds):

        data = [{'beneficiary_address': '0xdeadbeef', 'amount': 100, 'duration_seconds': 31536000},
                {'beneficiary_address': '0xabced120', 'amount': 133432, 'duration_seconds': 31536000*2},
                {'beneficiary_address': '0xf7aefec2', 'amount': 999, 'duration_seconds': 31536000*3}]
        """
        if allocation_registry and allocation_outfile:
            raise self.ActorError(
                "Pass either allocation registry or allocation_outfile, not both."
            )
        if allocation_registry is None:
            allocation_registry = AllocationRegistry(
                filepath=allocation_outfile)

        allocation_txhashes, failed = dict(), list()
        for allocation in allocations:
            deployer = self.deploy_user_escrow(
                allocation_registry=allocation_registry)

            try:
                txhashes = deployer.deliver(
                    value=allocation['amount'],
                    duration=allocation['duration_seconds'],
                    beneficiary_address=allocation['beneficiary_address'])
            except TransactionFailed:
                if crash_on_failure:
                    raise
                self.log.debug(
                    f"Failed allocation transaction for {allocation['amount']} to {allocation['beneficiary_address']}"
                )
                failed.append(allocation)
                continue

            else:
                allocation_txhashes[
                    allocation['beneficiary_address']] = txhashes

        if failed:
            # TODO: More with these failures: send to isolated logfile, and reattempt
            self.log.critical(
                f"FAILED TOKEN ALLOCATION - {len(failed)} Allocations failed.")

        return allocation_txhashes

    @staticmethod
    def __read_allocation_data(filepath: str) -> list:
        with open(filepath, 'r') as allocation_file:
            data = allocation_file.read()
            try:
                allocation_data = json.loads(data)
            except JSONDecodeError:
                raise
        return allocation_data

    def deploy_beneficiaries_from_file(self,
                                       allocation_data_filepath: str,
                                       allocation_outfile: str = None) -> dict:

        allocations = self.__read_allocation_data(
            filepath=allocation_data_filepath)
        txhashes = self.deploy_beneficiary_contracts(
            allocations=allocations, allocation_outfile=allocation_outfile)
        return txhashes

    def save_deployment_receipts(self, receipts: dict) -> str:
        filename = f'deployment-receipts-{self.deployer_address[:6]}-{maya.now().epoch}.json'
        filepath = os.path.join(DEFAULT_CONFIG_ROOT, filename)
        # TODO: Do not assume default config root
        os.makedirs(DEFAULT_CONFIG_ROOT, exist_ok=True)
        with open(filepath, 'w') as file:
            data = dict()
            for contract_name, receipts in receipts.items():
                contract_records = dict()
                for tx_name, receipt in receipts.items():
                    # Formatting
                    receipt = {
                        item: str(result)
                        for item, result in receipt.items()
                    }
                    contract_records.update(
                        {tx_name: receipt
                         for tx_name in receipts})
                data[contract_name] = contract_records
            data = json.dumps(data, indent=4)
            file.write(data)
        return filepath
Beispiel #20
0
class NodeConfiguration:

    _name = 'ursula'
    _character_class = Ursula

    DEFAULT_CONFIG_FILE_LOCATION = os.path.join(DEFAULT_CONFIG_ROOT,
                                                '{}.config'.format(_name))
    DEFAULT_OPERATING_MODE = 'decentralized'
    NODE_SERIALIZER = binascii.hexlify
    NODE_DESERIALIZER = binascii.unhexlify

    __CONFIG_FILE_EXT = '.config'
    __CONFIG_FILE_DESERIALIZER = json.loads
    __TEMP_CONFIGURATION_DIR_PREFIX = "nucypher-tmp-"
    __DEFAULT_NETWORK_MIDDLEWARE_CLASS = RestMiddleware
    __DEFAULT_NODE_STORAGE = LocalFileBasedNodeStorage

    __REGISTRY_NAME = 'contract_registry.json'
    REGISTRY_SOURCE = os.path.join(
        BASE_DIR, __REGISTRY_NAME)  # TODO: #461 Where will this be hosted?

    class ConfigurationError(RuntimeError):
        pass

    class InvalidConfiguration(ConfigurationError):
        pass

    def __init__(
            self,
            temp: bool = False,
            config_root: str = DEFAULT_CONFIG_ROOT,
            passphrase: str = None,
            auto_initialize: bool = False,
            auto_generate_keys: bool = False,
            config_file_location: str = DEFAULT_CONFIG_FILE_LOCATION,
            keyring_dir: str = None,
            checksum_address: str = None,
            is_me: bool = True,
            federated_only: bool = False,
            network_middleware: RestMiddleware = None,
            registry_source: str = REGISTRY_SOURCE,
            registry_filepath: str = None,
            import_seed_registry: bool = False,

            # Learner
            learn_on_same_thread: bool = False,
            abort_on_learning_error: bool = False,
            start_learning_now: bool = True,

            # TLS
            known_certificates_dir: str = None,

            # Metadata
            known_nodes: set = None,
            node_storage: NodeStorage = None,
            load_metadata: bool = True,
            save_metadata: bool = True) -> None:

        # Logs
        self.log = Logger(self.__class__.__name__)

        # Known Nodes
        self.known_nodes_dir = UNINITIALIZED_CONFIGURATION
        self.known_certificates_dir = known_certificates_dir or UNINITIALIZED_CONFIGURATION

        # Keyring
        self.keyring = UNINITIALIZED_CONFIGURATION
        self.keyring_dir = keyring_dir or UNINITIALIZED_CONFIGURATION

        # Contract Registry
        self.__registry_source = registry_source
        self.registry_filepath = registry_filepath or UNINITIALIZED_CONFIGURATION

        # Configuration Root Directory
        self.config_root = UNINITIALIZED_CONFIGURATION
        self.__temp = temp
        if self.__temp:
            self.__temp_dir = UNINITIALIZED_CONFIGURATION
            self.node_storage = InMemoryNodeStorage(
                federated_only=federated_only, character_class=self.__class__)
        else:
            self.config_root = config_root
            self.__temp_dir = LIVE_CONFIGURATION
            from nucypher.characters.lawful import Ursula  # TODO : Needs cleanup
            self.node_storage = node_storage or self.__DEFAULT_NODE_STORAGE(
                federated_only=federated_only, character_class=Ursula)
            self.__cache_runtime_filepaths()
        self.config_file_location = config_file_location

        #
        # Identity
        #
        self.federated_only = federated_only
        self.checksum_address = checksum_address
        self.is_me = is_me
        if self.is_me:
            #
            # Self
            #
            if checksum_address and not self.__temp:
                self.read_keyring()
            self.network_middleware = network_middleware or self.__DEFAULT_NETWORK_MIDDLEWARE_CLASS(
            )
        else:
            #
            # Stranger
            #
            self.known_nodes_dir = STRANGER_CONFIGURATION
            self.known_certificates_dir = STRANGER_CONFIGURATION
            self.node_storage = STRANGER_CONFIGURATION
            self.keyring_dir = STRANGER_CONFIGURATION
            self.keyring = STRANGER_CONFIGURATION
            self.network_middleware = STRANGER_CONFIGURATION
            if network_middleware:
                raise self.ConfigurationError(
                    "Cannot configure a stranger to use network middleware")

        #
        # Learner
        #
        self.known_nodes = known_nodes or set()
        self.learn_on_same_thread = learn_on_same_thread
        self.abort_on_learning_error = abort_on_learning_error
        self.start_learning_now = start_learning_now
        self.save_metadata = save_metadata
        self.load_metadata = load_metadata

        #
        # Auto-Initialization
        #
        if auto_initialize:
            self.initialize(no_registry=not import_seed_registry
                            or federated_only,
                            wallet=auto_generate_keys and not federated_only,
                            encrypting=auto_generate_keys,
                            passphrase=passphrase)

    def __call__(self, *args, **kwargs):
        return self.produce(*args, **kwargs)

    def cleanup(self) -> None:
        if self.__temp:
            self.__temp_dir.cleanup()

    @property
    def temp(self):
        return self.__temp

    def produce(self, passphrase: str = None, **overrides):
        """Initialize a new character instance and return it"""
        if not self.temp:
            self.read_keyring()
            self.keyring.unlock(passphrase=passphrase)
        merged_parameters = {
            **self.static_payload,
            **self.dynamic_payload,
            **overrides
        }
        return self._character_class(**merged_parameters)

    @staticmethod
    def _read_configuration_file(filepath) -> dict:
        with open(filepath, 'r') as file:
            payload = NodeConfiguration.__CONFIG_FILE_DESERIALIZER(file.read())
        return payload

    @classmethod
    def from_configuration_file(cls, filepath,
                                **overrides) -> 'NodeConfiguration':
        """Initialize a NodeConfiguration from a JSON file."""
        from nucypher.config.storages import NodeStorage  # TODO: move
        NODE_STORAGES = {
            storage_class._name: storage_class
            for storage_class in NodeStorage.__subclasses__()
        }

        payload = cls._read_configuration_file(filepath=filepath)

        # Make NodeStorage
        storage_payload = payload['node_storage']
        storage_type = storage_payload[NodeStorage._TYPE_LABEL]
        storage_class = NODE_STORAGES[storage_type]
        node_storage = storage_class.from_payload(
            payload=storage_payload,
            character_class=cls._character_class,
            federated_only=payload['federated_only'],
            serializer=cls.NODE_SERIALIZER,
            deserializer=cls.NODE_DESERIALIZER)

        payload.update(dict(node_storage=node_storage))
        return cls(is_me=True, **{**payload, **overrides})

    def to_configuration_file(self, filepath: str = None) -> str:
        """Write the static_payload to a JSON file."""
        if filepath is None:
            filename = '{}{}'.format(self._name.lower(),
                                     self.__CONFIG_FILE_EXT)
            filepath = os.path.join(self.config_root, filename)

        payload = self.static_payload
        del payload['is_me']  # TODO
        # Save node connection data
        payload.update(dict(node_storage=self.node_storage.payload()))

        with open(filepath, 'w') as config_file:
            config_file.write(json.dumps(payload, indent=4))
        return filepath

    def validate(self, config_root: str, no_registry=False) -> bool:
        # Top-level
        if not os.path.exists(config_root):
            raise self.ConfigurationError(
                'No configuration directory found at {}.'.format(config_root))

        # Sub-paths
        filepaths = self.runtime_filepaths
        if no_registry:
            del filepaths['registry_filepath']

        for field, path in filepaths.items():
            if not os.path.exists(path):
                message = 'Missing configuration directory {}.'
                raise NodeConfiguration.InvalidConfiguration(
                    message.format(path))
        return True

    @property
    def static_payload(self) -> dict:
        """Exported static configuration values for initializing Ursula"""
        payload = dict(
            # Identity
            is_me=self.is_me,
            federated_only=self.federated_only,  # TODO: 466
            checksum_address=self.checksum_address,
            keyring_dir=self.keyring_dir,
            known_certificates_dir=self.known_certificates_dir,

            # Behavior
            learn_on_same_thread=self.learn_on_same_thread,
            abort_on_learning_error=self.abort_on_learning_error,
            start_learning_now=self.start_learning_now,
            save_metadata=self.save_metadata)
        return payload

    @property
    def dynamic_payload(self, **overrides) -> dict:
        """Exported dynamic configuration values for initializing Ursula"""
        if self.load_metadata:
            self.known_nodes.update(
                self.node_storage.all(federated_only=self.federated_only))
        payload = dict(network_middleware=self.network_middleware
                       or self.__DEFAULT_NETWORK_MIDDLEWARE_CLASS(),
                       known_nodes=self.known_nodes,
                       node_storage=self.node_storage,
                       crypto_power_ups=self.derive_node_power_ups() or None)
        if overrides:
            self.log.debug(
                "Overrides supplied to dynamic payload for {}".format(
                    self.__class__.__name__))
            payload.update(overrides)
        return payload

    @property
    def runtime_filepaths(self):
        filepaths = dict(config_root=self.config_root,
                         keyring_dir=self.keyring_dir,
                         known_certificates_dir=self.known_certificates_dir,
                         registry_filepath=self.registry_filepath)
        return filepaths

    @staticmethod
    def generate_runtime_filepaths(config_root: str) -> dict:
        """Dynamically generate paths based on configuration root directory"""
        known_nodes_dir = os.path.join(config_root, 'known_nodes')
        filepaths = dict(
            config_root=config_root,
            keyring_dir=os.path.join(config_root, 'keyring'),
            known_nodes_dir=known_nodes_dir,
            known_certificates_dir=os.path.join(known_nodes_dir,
                                                'certificates'),
            registry_filepath=os.path.join(config_root,
                                           NodeConfiguration.__REGISTRY_NAME))
        return filepaths

    def __cache_runtime_filepaths(self) -> None:
        """Generate runtime filepaths and cache them on the config object"""
        filepaths = self.generate_runtime_filepaths(
            config_root=self.config_root)
        for field, filepath in filepaths.items():
            if getattr(self, field) is UNINITIALIZED_CONFIGURATION:
                setattr(self, field, filepath)

    def derive_node_power_ups(self) -> List[CryptoPowerUp]:
        power_ups = list()
        if self.is_me and not self.temp:
            for power_class in self._character_class._default_crypto_powerups:
                power_up = self.keyring.derive_crypto_power(power_class)
                power_ups.append(power_up)
        return power_ups

    def initialize(self,
                   passphrase: str,
                   no_registry: bool = False,
                   wallet: bool = False,
                   encrypting: bool = False,
                   tls: bool = False,
                   host: str = None,
                   curve=None,
                   no_keys: bool = False) -> str:
        """Write a new configuration to the disk, and with the configured node store."""

        #
        # Create Config Root
        #
        if self.__temp:
            self.__temp_dir = TemporaryDirectory(
                prefix=self.__TEMP_CONFIGURATION_DIR_PREFIX)
            self.config_root = self.__temp_dir.name
        else:
            try:
                os.mkdir(self.config_root, mode=0o755)
            except FileExistsError:
                message = "There are existing configuration files at {}".format(
                    self.config_root)
                raise self.ConfigurationError(message)
            except FileNotFoundError:
                message = "Cannot write configuration files because the directory {} does not exist."
                raise self.ConfigurationError(message)

        #
        # Create Config Subdirectories
        #
        self.__cache_runtime_filepaths()
        try:

            # Directories
            os.mkdir(self.keyring_dir, mode=0o700)  # keyring
            os.mkdir(self.known_nodes_dir, mode=0o755)  # known_nodes
            os.mkdir(self.known_certificates_dir, mode=0o755)  # known_certs
            self.node_storage.initialize()  # TODO: default known dir

            if not self.temp and not no_keys:
                # Keyring
                self.write_keyring(passphrase=passphrase,
                                   wallet=wallet,
                                   encrypting=encrypting,
                                   tls=tls,
                                   host=host,
                                   tls_curve=curve)

            # Registry
            if not no_registry and not self.federated_only:
                self.write_registry(output_filepath=self.registry_filepath,
                                    source=self.__registry_source,
                                    blank=no_registry)

        except FileExistsError:
            existing_paths = [
                os.path.join(self.config_root, f)
                for f in os.listdir(self.config_root)
            ]
            message = "There are pre-existing nucypher installation files at {}: {}".format(
                self.config_root, existing_paths)
            self.log.critical(message)
            raise NodeConfiguration.ConfigurationError(message)

        if not self.__temp:
            self.validate(config_root=self.config_root,
                          no_registry=no_registry or self.federated_only)
        return self.config_root

    def read_known_nodes(self):
        self.known_nodes.update(
            self.node_storage.all(federated_only=self.federated_only))
        return self.known_nodes

    def read_keyring(self, *args, **kwargs):
        if self.checksum_address is None:
            raise self.ConfigurationError(
                "No account specified to unlock keyring")
        self.keyring = NucypherKeyring(keyring_root=self.keyring_dir,
                                       account=self.checksum_address,
                                       *args,
                                       **kwargs)

    def write_keyring(
        self,
        passphrase: str,
        encrypting: bool,
        wallet: bool,
        tls: bool,
        host: str,
        tls_curve: EllipticCurve = None,
    ) -> NucypherKeyring:

        self.keyring = NucypherKeyring.generate(passphrase=passphrase,
                                                encrypting=encrypting,
                                                wallet=wallet,
                                                tls=tls,
                                                host=host,
                                                curve=tls_curve,
                                                keyring_root=self.keyring_dir)

        # TODO: Operating mode switch #466
        if self.federated_only or not wallet:
            self.checksum_address = self.keyring.federated_address
        else:
            self.checksum_address = self.keyring.checksum_address
        if tls:
            self.certificate_filepath = self.keyring.certificate_filepath

        return self.keyring

    def write_registry(self,
                       output_filepath: str = None,
                       source: str = None,
                       force: bool = False,
                       blank=False) -> str:

        if force and os.path.isfile(output_filepath):
            raise self.ConfigurationError(
                'There is an existing file at the registry output_filepath {}'.
                format(output_filepath))

        output_filepath = output_filepath or self.registry_filepath
        source = source or self.REGISTRY_SOURCE

        if not blank and not self.temp:
            # Validate Registry
            with open(source, 'r') as registry_file:
                try:
                    json.loads(registry_file.read())
                except JSONDecodeError:
                    message = "The registry source {} is not valid JSON".format(
                        source)
                    self.log.critical(message)
                    raise self.ConfigurationError(message)
                else:
                    self.log.debug(
                        "Source registry {} is valid JSON".format(source))

        else:
            self.log.warn("Writing blank registry")
            open(output_filepath, 'w').close()  # write blank

        self.log.info(
            "Successfully wrote registry to {}".format(output_filepath))
        return output_filepath
Beispiel #21
0
class Rest(object):

    def __init__(
            self,
            host='https://developer-api.nest.com',
            token=None,
            event_handler=None,
            net_type='lan'):
        self.log = Logger()
        self.host = host
        self.token = token
        self.event_handler = event_handler
        self.pool = HTTPConnectionPool(reactor, persistent=True)
        self.loc = None
        self.reconnect = False
        self.fail_count = 0
        if event_handler:
            self.reconnect = True
            d = self.request(headers={'User-Agent': ['onDemand Rest Client'],
                                      'Accept': ['text/event-stream']})
            d.addCallback(self.on_disconnect)

    def __getattr__(self, name):
        try:
            super(Rest, self).__getattr__(name)
        except AttributeError:
            return RestCall(self, name)

    def on_disconnect(self, reason):
        if not reason:
            reason = {'reason': 'no_message'}
        self.log.critical(
            'disconnected: {reason}', reason=reason['reason'])
        if self.fail_count > 10:
            self.log.error('Max error count reached, aborting connection')

        def test_connectivity(count):
            if self.fail_count == count:
                self.fail_count = 0

        self.fail_count += 1
        c = self.fail_count
        reactor.callLater(10, test_connectivity, c)  # @UndefinedVariable
        if self.reconnect:
            d = self.request(headers={'User-Agent': ['onDemand Rest Client'],
                                      'Accept': ['text/event-stream']})
            d.addCallback(self.on_disconnect)

    def request(self, method='GET',
                path='',
                headers={'User-Agent': ['onDemand/1.0 (Rest_Client)'],
                         'Accept': ['application/json']},
                body=None):

        data = None
        if self.loc:
            host = '/'.join((self.loc, path))
        else:
            host = '/'.join((self.host, path))
        if self.token:
            host += '?auth=' + self.token
        if body:
            headers.update({'Content-Type': ['application/json']})
            data = FileBodyProducer(StringIO(json.dumps(body)))
        agent = RedirectAgent(Agent(reactor, pool=self.pool))
        d = agent.request(method, host, Headers(headers), data)

        def cbFail(fail):

            if hasattr(fail.value, 'response'):
                if hasattr(fail.value.response, 'code'):
                    if fail.value.response.code == 307:
                        loc = fail.value.response.headers.getRawHeaders(
                            'location')
                        new = urlparse(loc[0])
                        newhost = '://'.join((new.scheme, new.netloc))
                        if newhost == self.host:
                            self.loc = None
                        else:
                            self.loc = newhost
                        self.log.debug('redirect: %s' % self.loc)
                        data = FileBodyProducer(StringIO(json.dumps(body)))
                        d = agent.request(
                            method, loc[0], Headers(headers), data)
                        d.addCallbacks(cbRequest, cbFail)
                        return d
                    elif fail.value.response.code == 404 and self.loc:
                        self.loc = None
                        host = '/'.join((self.host, path))
                        if self.token:
                            host += '?auth=' + self.token
                        d = self.request(method, host, Headers(headers), body)
                        d.addCallbacks(cbRequest, cbFail)
                        return d
                else:
                    print(dir(fail.value))
                    print(fail.value.message)
                    print(fail.value.args)

            self.log.error('unhandled failure: %s -- %s' % (
                fail.value.message, fail.value))

        def cbRequest(response):
            #  print 'Response version:', response.version
            #  print 'Response code:', response.code
            #  print 'Response phrase:', response.phrase
            #  print 'Response headers:'
            #  print pformat(list(response.headers.getAllRawHeaders()))
            finished = Deferred()
            response.deliverBody(RestHandle(finished, self.event_handler))
            return finished
        d.addCallbacks(cbRequest, cbFail)
        return d
Beispiel #22
0
class BaseContractRegistry(ABC):
    """
    Records known contracts on the disk for future access and utility. This
    lazily writes to the filesystem during contract enrollment.

    WARNING: Unless you are developing NuCypher, you most likely won't ever need
    to use this.
    """

    logger = Logger('ContractRegistry')

    _multi_contract = True
    _contract_name = NotImplemented

    # Registry
    REGISTRY_NAME = 'contract_registry.json'  # TODO: #1511 Save registry with ID-time-based filename
    DEVELOPMENT_REGISTRY_NAME = 'dev_contract_registry.json'

    class RegistryError(Exception):
        pass

    class EmptyRegistry(RegistryError):
        pass

    class NoRegistry(RegistryError):
        pass

    class UnknownContract(RegistryError):
        pass

    class InvalidRegistry(RegistryError):
        """Raised when invalid data is encountered in the registry"""

    class CantOverwriteRegistry(RegistryError):
        pass

    def __init__(self, source=NO_REGISTRY_SOURCE, *args, **kwargs):
        self.__source = source
        self.log = Logger("registry")

    def __eq__(self, other) -> bool:
        if self is other:
            return True  # and that's all
        return bool(self.id == other.id)

    def __repr__(self) -> str:
        r = f"{self.__class__.__name__}(id={self.id[:6]})"
        return r

    @property
    def id(self) -> str:
        """Returns a hexstr of the registry contents."""
        blake = hashlib.blake2b()
        blake.update(self.__class__.__name__.encode())
        blake.update(json.dumps(self.read()).encode())
        digest = blake.digest().hex()
        return digest

    @abstractmethod
    def _destroy(self) -> None:
        raise NotImplementedError

    @abstractmethod
    def write(self, registry_data: list) -> None:
        raise NotImplementedError

    @abstractmethod
    def read(self) -> Union[list, dict]:
        raise NotImplementedError

    @classmethod
    def from_latest_publication(cls,
                                *args,
                                source_manager=None,
                                network: str = NetworksInventory.DEFAULT,
                                **kwargs) -> 'BaseContractRegistry':
        """
        Get the latest contract registry available from a registry source chain.
        """
        if not source_manager:
            source_manager = RegistrySourceManager()

        registry_data, source = source_manager.fetch_latest_publication(
            registry_class=cls, network=network)

        registry_instance = cls(*args, source=source, **kwargs)
        registry_instance.write(registry_data=json.loads(registry_data))
        return registry_instance

    @property
    def source(self) -> 'CanonicalRegistrySource':
        return self.__source

    @property
    def enrolled_names(self) -> Iterator:
        entries = iter(record[0] for record in self.read())
        return entries

    @property
    def enrolled_addresses(self) -> Iterator:
        entries = iter(record[1] for record in self.read())
        return entries

    def enroll(self, contract_name, contract_address, contract_abi,
               contract_version) -> None:
        """
        Enrolls a contract to the chain registry by writing the name, address,
        and abi information to the filesystem as JSON.

        Note: Unless you are developing NuCypher, you most likely won't ever
        need to use this.
        """
        contract_data = [
            contract_name, contract_version, contract_address, contract_abi
        ]
        try:
            registry_data = self.read()
        except self.RegistryError:
            self.log.info(
                "Blank registry encountered: enrolling {}:{}:{}".format(
                    contract_name, contract_version, contract_address))
            registry_data = list()  # empty registry

        registry_data.append(contract_data)
        self.write(registry_data)
        self.log.info("Enrolled {}:{}:{} into registry.".format(
            contract_name, contract_version, contract_address))

    def search(self,
               contract_name: str = None,
               contract_version: str = None,
               contract_address: str = None) -> tuple:
        """
        Searches the registry for a contract with the provided name or address
        and returns the contracts component data.
        """
        if not (bool(contract_name) ^ bool(contract_address)):
            raise ValueError(
                "Pass contract_name or contract_address, not both.")
        if bool(contract_version) and not bool(contract_name):
            raise ValueError(
                "Pass contract_version together with contract_name.")

        contracts = list()
        registry_data = self.read()

        try:
            for contract in registry_data:
                if len(contract) == 3:
                    name, address, abi = contract
                    version = None
                else:
                    name, version, address, abi = contract
                if contract_name == name and \
                        (contract_version is None or version == contract_version) or \
                        contract_address == address:
                    contracts.append((name, version, address, abi))
        except ValueError:
            message = "Missing or corrupted registry data"
            self.log.critical(message)
            raise self.InvalidRegistry(message)

        if not contracts:
            raise self.UnknownContract(contract_name)

        if contract_address and len(contracts) > 1:
            m = f"Multiple records returned for address {contract_address}"
            self.log.critical(m)
            raise self.InvalidRegistry(m)

        result = tuple(contracts) if contract_name else contracts[0]
        return result
Beispiel #23
0
class BlockchainInterface:
    """
    Interacts with a solidity compiler and a registry in order to instantiate compiled
    ethereum contracts with the given web3 provider backend.
    """

    TIMEOUT = 600  # seconds  # TODO: Correlate with the gas strategy - #2070

    DEFAULT_GAS_STRATEGY = 'medium'
    GAS_STRATEGIES = {
        'glacial': time_based.glacial_gas_price_strategy,  # 24h
        'slow': time_based.slow_gas_price_strategy,  # 1h
        'medium': time_based.medium_gas_price_strategy,  # 5m
        'fast': time_based.fast_gas_price_strategy  # 60s
    }

    process = NO_PROVIDER_PROCESS.bool_value(False)
    Web3 = Web3

    _contract_factory = VersionedContract

    class InterfaceError(Exception):
        pass

    class NoProvider(InterfaceError):
        pass

    class UnsupportedProvider(InterfaceError):
        pass

    class ConnectionFailed(InterfaceError):
        pass

    class UnknownContract(InterfaceError):
        pass

    REASONS = {
        INSUFFICIENT_ETH: 'insufficient funds for gas * price + value',
    }

    class TransactionFailed(InterfaceError):

        IPC_CODE = -32000  # (geth)

        def __init__(self, message: str, transaction_dict: dict,
                     contract_function: Union[ContractFunction,
                                              ContractConstructor], *args):

            self.base_message = message
            self.name = get_transaction_name(
                contract_function=contract_function)
            self.payload = transaction_dict
            self.contract_function = contract_function
            self.failures = {
                BlockchainInterface.REASONS[INSUFFICIENT_ETH]:
                self.insufficient_eth
            }
            self.message = self.failures.get(self.base_message, self.default)
            super().__init__(self.message, *args)

        @property
        def default(self) -> str:
            message = f'{self.name} from {self.payload["from"][:6]} - {self.base_message}'
            return message

        @property
        def insufficient_eth(self) -> str:
            gas = (self.payload.get('gas', 1) * self.payload['gasPrice']
                   )  # FIXME: If gas is not included...
            cost = gas + self.payload.get('value', 0)
            blockchain = BlockchainInterfaceFactory.get_interface()
            balance = blockchain.client.get_balance(
                account=self.payload['from'])
            message = f'{self.payload} from {self.payload["from"][:8]} - {self.base_message}.' \
                      f'Calculated cost is {cost} but sender only has {balance}.'
            return message

    def __init__(
            self,
            emitter=None,  # TODO # 1754
            poa: bool = None,
            light: bool = False,
            provider_process=NO_PROVIDER_PROCESS,
            provider_uri: str = NO_BLOCKCHAIN_CONNECTION,
            provider: Web3Providers = NO_BLOCKCHAIN_CONNECTION,
            gas_strategy: Union[str, Callable] = DEFAULT_GAS_STRATEGY):
        """
        A blockchain "network interface"; the circumflex wraps entirely around the bounds of
        contract operations including compilation, deployment, and execution.

        TODO: #1502 - Move to API docs.

         Filesystem          Configuration           Node              Client                  EVM
        ================ ====================== =============== =====================  ===========================

         Solidity Files -- SolidityCompiler -                      --- HTTPProvider ------ ...
                                            |                    |
                                            |                    |
                                            |                    |
                                            - *BlockchainInterface* -- IPCProvider ----- External EVM (geth, parity...)
                                                       |         |
                                                       |         |
                                                 TestProvider ----- EthereumTester -------------
                                                                                                |
                                                                                                |
                                                                                        PyEVM (Development Chain)

         ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

         Runtime Files --                 --BlockchainInterface ----> Registry
                        |                |             ^
                        |                |             |
                        |                |             |
         Key Files ------ CharacterConfiguration     Agent                          ... (Contract API)
                        |                |             ^
                        |                |             |
                        |                |             |
                        |                |           Actor                          ...Blockchain-Character API)
                        |                |             ^
                        |                |             |
                        |                |             |
         Config File ---                  --------- Character                       ... (Public API)
                                                       ^
                                                       |
                                                     Human


        The Blockchain is the junction of the solidity compiler, a contract registry, and a collection of
        web3 network providers as a means of interfacing with the ethereum blockchain to execute
        or deploy contract code on the network.


        Compiler and Registry Usage
        -----------------------------

        Contracts are freshly re-compiled if an instance of SolidityCompiler is passed; otherwise,
        The registry will read contract data saved to disk that is be used to retrieve contact address and op-codes.
        Optionally, A registry instance can be passed instead.


        Provider Usage
        ---------------
        https: // github.com / ethereum / eth - tester     # available-backends


        * HTTP Provider - Web3 HTTP provider, typically JSON RPC 2.0 over HTTP
        * Websocket Provider - Web3 WS provider, typically JSON RPC 2.0 over WS, supply endpoint uri and websocket=True
        * IPC Provider - Web3 File based IPC provider transported over standard I/O
        * Custom Provider - A pre-initialized web3.py provider instance to attach to this interface

        """

        self.log = Logger('Blockchain')
        self.poa = poa
        self.provider_uri = provider_uri
        self._provider = provider
        self._provider_process = provider_process
        self.w3 = NO_BLOCKCHAIN_CONNECTION
        self.client = NO_BLOCKCHAIN_CONNECTION  # type: EthereumClient
        self.transacting_power = READ_ONLY_INTERFACE
        self.is_light = light
        self.gas_strategy = self.get_gas_strategy(gas_strategy)

    def __repr__(self):
        r = '{name}({uri})'.format(name=self.__class__.__name__,
                                   uri=self.provider_uri)
        return r

    @classmethod
    def from_dict(cls, payload: dict, **overrides) -> 'BlockchainInterface':
        payload.update({k: v for k, v in overrides.items() if v is not None})
        blockchain = cls(**payload)
        return blockchain

    def to_dict(self) -> dict:
        payload = dict(provider_uri=self.provider_uri,
                       poa=self.poa,
                       light=self.is_light)
        return payload

    @property
    def is_connected(self) -> bool:
        """
        https://web3py.readthedocs.io/en/stable/__provider.html#examples-using-automated-detection
        """
        if self.client is NO_BLOCKCHAIN_CONNECTION:
            return False
        return self.client.is_connected

    @classmethod
    def get_gas_strategy(cls, gas_strategy: Union[str, Callable]) -> Callable:
        try:
            gas_strategy = cls.GAS_STRATEGIES[gas_strategy]
        except KeyError:
            if gas_strategy and not callable(gas_strategy):
                raise ValueError(
                    f"{gas_strategy} must be callable to be a valid gas strategy."
                )
            else:
                gas_strategy = cls.GAS_STRATEGIES[cls.DEFAULT_GAS_STRATEGY]
        return gas_strategy

    def attach_middleware(self):
        if self.poa is None:  # If POA is not set explicitly, try to autodetect from chain id
            chain_id = int(self.client.chain_id)
            self.poa = chain_id in POA_CHAINS
            self.log.debug(
                f'Autodetecting POA chain ({self.client.chain_name})')

        # For use with Proof-Of-Authority test-blockchains
        if self.poa is True:
            self.log.debug('Injecting POA middleware at layer 0')
            self.client.inject_middleware(geth_poa_middleware, layer=0)

        # Gas Price Strategy
        self.client.w3.eth.setGasPriceStrategy(self.gas_strategy)
        self.client.w3.middleware_onion.add(
            middleware.time_based_cache_middleware)
        self.client.w3.middleware_onion.add(
            middleware.latest_block_based_cache_middleware)
        self.client.w3.middleware_onion.add(middleware.simple_cache_middleware)

    def connect(self):

        # Spawn child process
        if self._provider_process:
            self._provider_process.start()
            provider_uri = self._provider_process.provider_uri(scheme='file')
        else:
            provider_uri = self.provider_uri
            self.log.info(
                f"Using external Web3 Provider '{self.provider_uri}'")

        # Attach Provider
        self._attach_provider(provider=self._provider,
                              provider_uri=provider_uri)
        self.log.info("Connecting to {}".format(self.provider_uri))
        if self._provider is NO_BLOCKCHAIN_CONNECTION:
            raise self.NoProvider(
                "There are no configured blockchain providers")

        # Connect if not connected
        try:
            self.w3 = self.Web3(provider=self._provider)
            self.client = EthereumClient.from_w3(w3=self.w3)
        except requests.ConnectionError:  # RPC
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is RPC enabled?'
            )
        except FileNotFoundError:  # IPC File Protocol
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is IPC enabled?'
            )
        else:
            self.attach_middleware()

        return self.is_connected

    def sync(self, emitter=None) -> None:

        sync_state = self.client.sync()
        if emitter is not None:

            emitter.echo(
                f"Syncing: {self.client.chain_name.capitalize()}. Waiting for sync to begin.",
                verbosity=1)

            while not len(self.client.peers):
                emitter.echo("waiting for peers...", verbosity=1)
                time.sleep(5)

            peer_count = len(self.client.peers)
            emitter.echo(
                f"Found {'an' if peer_count == 1 else peer_count} Ethereum peer{('s' if peer_count > 1 else '')}.",
                verbosity=1)

            try:
                emitter.echo("Beginning sync...", verbosity=1)
                initial_state = next(sync_state)
            except StopIteration:  # will occur if no syncing needs to happen
                emitter.echo("Local blockchain data is already synced.",
                             verbosity=1)
                return

            prior_state = initial_state
            total_blocks_to_sync = int(initial_state.get(
                'highestBlock', 0)) - int(initial_state.get('currentBlock', 0))
            with click.progressbar(
                    length=total_blocks_to_sync,
                    label="sync progress",
                    file=emitter.get_stream(verbosity=1)) as bar:
                for syncdata in sync_state:
                    if syncdata:
                        blocks_accomplished = int(
                            syncdata['currentBlock']) - int(
                                prior_state.get('currentBlock', 0))
                        bar.update(blocks_accomplished)
                        prior_state = syncdata
        else:
            try:
                for syncdata in sync_state:
                    self.client.log.info(
                        f"Syncing {syncdata['currentBlock']}/{syncdata['highestBlock']}"
                    )
            except TypeError:  # it's already synced
                return
        return

    @property
    def provider(self) -> Union[IPCProvider, WebsocketProvider, HTTPProvider]:
        return self._provider

    def _attach_provider(self,
                         provider: Web3Providers = None,
                         provider_uri: str = None) -> None:
        """
        https://web3py.readthedocs.io/en/latest/providers.html#providers
        """

        if not provider_uri and not provider:
            raise self.NoProvider("No URI or provider instances supplied.")

        if provider_uri and not provider:
            uri_breakdown = urlparse(provider_uri)

            if uri_breakdown.scheme == 'tester':
                providers = {
                    'pyevm': _get_pyevm_test_provider,
                    'geth': _get_test_geth_parity_provider,
                    'parity-ethereum': _get_test_geth_parity_provider,
                    'mock': _get_mock_test_provider
                }
                provider_scheme = uri_breakdown.netloc

            else:
                providers = {
                    'auto': _get_auto_provider,
                    'infura': _get_infura_provider,
                    'ipc': _get_IPC_provider,
                    'file': _get_IPC_provider,
                    'ws': _get_websocket_provider,
                    'wss': _get_websocket_provider,
                    'http': _get_HTTP_provider,
                    'https': _get_HTTP_provider,
                }
                provider_scheme = uri_breakdown.scheme

            # auto-detect for file based ipc
            if not provider_scheme:
                if os.path.exists(provider_uri):
                    # file is available - assume ipc/file scheme
                    provider_scheme = 'file'
                    self.log.info(
                        f"Auto-detected provider scheme as 'file://' for provider {provider_uri}"
                    )

            try:
                self._provider = providers[provider_scheme](provider_uri)
            except KeyError:
                raise self.UnsupportedProvider(
                    f"{provider_uri} is an invalid or unsupported blockchain provider URI"
                )
            else:
                self.provider_uri = provider_uri or NO_BLOCKCHAIN_CONNECTION
        else:
            self._provider = provider

    def __transaction_failed(
        self, exception: Exception, transaction_dict: dict,
        contract_function: Union[ContractFunction,
                                 ContractConstructor]) -> None:
        """
        Re-raising error handler and context manager for transaction broadcast or
        build failure events at the interface layer. This method is a last line of defense
        against unhandled exceptions caused by transaction failures and must raise an exception.
        # TODO: #1504 - Additional Handling of validation failures (gas limits, invalid fields, etc.)
        """

        try:
            # Assume this error is formatted as an IPC response
            code, message = exception.args[0].values()

        except (ValueError, IndexError, AttributeError):
            # TODO: #1504 - Try even harder to determine if this is insufficient funds causing the issue,
            #               This may be best handled at the agent or actor layer for registry and token interactions.
            # Worst case scenario - raise the exception held in context implicitly
            raise exception

        else:
            if int(code) != self.TransactionFailed.IPC_CODE:
                # Only handle client-specific exceptions
                # https://www.jsonrpc.org/specification Section 5.1
                raise exception
            self.log.critical(message)  # simple context
            raise self.TransactionFailed(
                message=message,  # rich error (best case)
                contract_function=contract_function,
                transaction_dict=transaction_dict)

    def __log_transaction(self, transaction_dict: dict,
                          contract_function: ContractFunction):
        """
        Format and log a transaction dict and return the transaction name string.
        This method *must not* mutate the original transaction dict.
        """
        # Do not mutate the original transaction dict
        tx = dict(transaction_dict).copy()

        # Format
        if tx.get('to'):
            tx['to'] = to_checksum_address(contract_function.address)
        try:
            tx['selector'] = contract_function.selector
        except AttributeError:
            pass
        tx['from'] = to_checksum_address(tx['from'])
        tx.update({
            f: prettify_eth_amount(v)
            for f, v in tx.items() if f in ('gasPrice', 'value')
        })
        payload_pprint = ', '.join("{}: {}".format(k, v)
                                   for k, v in tx.items())

        # Log
        transaction_name = get_transaction_name(
            contract_function=contract_function)
        self.log.debug(f"[TX-{transaction_name}] | {payload_pprint}")

    @validate_checksum_address
    def build_transaction(
        self,
        contract_function: ContractFunction,
        sender_address: str,
        payload: dict = None,
        transaction_gas_limit: int = None,
    ) -> dict:

        #
        # Build Payload
        #

        base_payload = {
            'chainId':
            int(self.client.chain_id),
            'nonce':
            self.client.w3.eth.getTransactionCount(sender_address, 'pending'),
            'from':
            sender_address,
            'gasPrice':
            self.client.gas_price
        }

        # Aggregate
        if not payload:
            payload = {}
        payload.update(base_payload)
        # Explicit gas override - will skip gas estimation in next operation.
        if transaction_gas_limit:
            payload['gas'] = int(transaction_gas_limit)

        #
        # Build Transaction
        #

        self.__log_transaction(transaction_dict=payload,
                               contract_function=contract_function)
        try:
            transaction_dict = contract_function.buildTransaction(
                payload)  # Gas estimation occurs here
        except (TestTransactionFailed, ValidationError, ValueError) as error:
            # Note: Geth raises ValueError in the same condition that pyevm raises ValidationError here.
            # Treat this condition as "Transaction Failed" during gas estimation.
            raise self.__transaction_failed(
                exception=error,
                transaction_dict=payload,
                contract_function=contract_function)
        return transaction_dict

    def sign_and_broadcast_transaction(self,
                                       transaction_dict,
                                       transaction_name: str = "",
                                       confirmations: int = 0) -> dict:

        #
        # Setup
        #

        # TODO # 1754 - Move this to singleton - I do not approve... nor does Bogdan?
        if GlobalLoggerSettings._json_ipc:
            emitter = JSONRPCStdoutEmitter()
        else:
            emitter = StdoutEmitter()

        if self.transacting_power is READ_ONLY_INTERFACE:
            raise self.InterfaceError(str(READ_ONLY_INTERFACE))

        #
        # Sign
        #

        # TODO: Show the USD Price:  https://api.coinmarketcap.com/v1/ticker/ethereum/
        price = transaction_dict['gasPrice']
        cost_wei = price * transaction_dict['gas']
        cost = Web3.fromWei(cost_wei, 'gwei')
        if self.transacting_power.is_device:
            emitter.message(
                f'Confirm transaction {transaction_name} on hardware wallet... ({cost} gwei @ {price})',
                color='yellow')
        signed_raw_transaction = self.transacting_power.sign_transaction(
            transaction_dict)

        #
        # Broadcast
        #

        emitter.message(
            f'Broadcasting {transaction_name} Transaction ({cost} gwei @ {price})...',
            color='yellow')
        try:
            txhash = self.client.send_raw_transaction(
                signed_raw_transaction)  # <--- BROADCAST
        except (TestTransactionFailed, ValueError) as error:
            raise  # TODO: Unify with Transaction failed handling

        #
        # Receipt
        #

        try:  # TODO: Handle block confirmation exceptions
            receipt = self.client.wait_for_receipt(txhash,
                                                   timeout=self.TIMEOUT,
                                                   confirmations=confirmations)
        except TimeExhausted:
            # TODO: #1504 - Handle transaction timeout
            raise
        else:
            self.log.debug(
                f"[RECEIPT-{transaction_name}] | txhash: {receipt['transactionHash'].hex()}"
            )

        #
        # Confirmations
        #

        # Primary check
        transaction_status = receipt.get('status', UNKNOWN_TX_STATUS)
        if transaction_status == 0:
            failure = f"Transaction transmitted, but receipt returned status code 0. " \
                      f"Full receipt: \n {pprint.pformat(receipt, indent=2)}"
            raise self.InterfaceError(failure)

        if transaction_status is UNKNOWN_TX_STATUS:
            self.log.info(
                f"Unknown transaction status for {txhash} (receipt did not contain a status field)"
            )

            # Secondary check
            tx = self.client.get_transaction(txhash)
            if tx["gas"] == receipt["gasUsed"]:
                raise self.InterfaceError(
                    f"Transaction consumed 100% of transaction gas."
                    f"Full receipt: \n {pprint.pformat(receipt, indent=2)}")

        return receipt

    def get_blocktime(self):
        return self.client.get_blocktime()

    @validate_checksum_address
    def send_transaction(self,
                         contract_function: Union[ContractFunction,
                                                  ContractConstructor],
                         sender_address: str,
                         payload: dict = None,
                         transaction_gas_limit: int = None,
                         confirmations: int = 0) -> dict:

        transaction = self.build_transaction(
            contract_function=contract_function,
            sender_address=sender_address,
            payload=payload,
            transaction_gas_limit=transaction_gas_limit)

        # Get transaction name
        try:
            transaction_name = contract_function.fn_name.upper()
        except AttributeError:
            transaction_name = 'DEPLOY' if isinstance(
                contract_function, ContractConstructor) else 'UNKNOWN'

        receipt = self.sign_and_broadcast_transaction(
            transaction_dict=transaction,
            transaction_name=transaction_name,
            confirmations=confirmations)
        return receipt

    def get_contract_by_name(
            self,
            registry: BaseContractRegistry,
            contract_name: str,
            contract_version: str = None,
            enrollment_version: Union[int, str] = None,
            proxy_name: str = None,
            use_proxy_address: bool = True) -> VersionedContract:
        """
        Instantiate a deployed contract from registry data,
        and assimilate it with its proxy if it is upgradeable.
        """
        target_contract_records = registry.search(
            contract_name=contract_name, contract_version=contract_version)

        if not target_contract_records:
            raise self.UnknownContract(
                f"No such contract records with name {contract_name}:{contract_version}."
            )

        if proxy_name:

            # Lookup proxies; Search for a published proxy that targets this contract record
            proxy_records = registry.search(contract_name=proxy_name)

            results = list()
            for proxy_name, proxy_version, proxy_address, proxy_abi in proxy_records:
                proxy_contract = self.client.w3.eth.contract(
                    abi=proxy_abi,
                    address=proxy_address,
                    version=proxy_version,
                    ContractFactoryClass=self._contract_factory)

                # Read this dispatcher's target address from the blockchain
                proxy_live_target_address = proxy_contract.functions.target(
                ).call()
                for target_name, target_version, target_address, target_abi in target_contract_records:

                    if target_address == proxy_live_target_address:
                        if use_proxy_address:
                            triplet = (proxy_address, target_version,
                                       target_abi)
                        else:
                            triplet = (target_address, target_version,
                                       target_abi)
                    else:
                        continue

                    results.append(triplet)

            if len(results) > 1:
                address, _version, _abi = results[0]
                message = "Multiple {} deployments are targeting {}".format(
                    proxy_name, address)
                raise self.InterfaceError(message.format(contract_name))

            else:
                try:
                    selected_address, selected_version, selected_abi = results[
                        0]
                except IndexError:
                    raise self.UnknownContract(
                        f"There are no Dispatcher records targeting '{contract_name}':{contract_version}"
                    )

        else:
            # TODO: use_proxy_address doesnt' work in this case. Should we raise if used?

            # NOTE: 0 must be allowed as a valid version number
            if len(target_contract_records) != 1:
                if enrollment_version is None:
                    m = f"{len(target_contract_records)} records enrolled " \
                        f"for contract {contract_name}:{contract_version} " \
                        f"and no version index was supplied."
                    raise self.InterfaceError(m)
                enrollment_version = self.__get_enrollment_version_index(
                    name=contract_name,
                    contract_version=contract_version,
                    version_index=enrollment_version,
                    enrollments=len(target_contract_records))

            else:
                enrollment_version = -1  # default

            _contract_name, selected_version, selected_address, selected_abi = target_contract_records[
                enrollment_version]

        # Create the contract from selected sources
        unified_contract = self.client.w3.eth.contract(
            abi=selected_abi,
            address=selected_address,
            version=selected_version,
            ContractFactoryClass=self._contract_factory)

        return unified_contract

    @staticmethod
    def __get_enrollment_version_index(version_index: Union[int, str],
                                       enrollments: int, name: str,
                                       contract_version: str):
        version_names = {'latest': -1, 'earliest': 0}
        try:
            version = version_names[version_index]
        except KeyError:
            try:
                version = int(version_index)
            except ValueError:
                what_is_this = version_index
                raise ValueError(
                    f"'{what_is_this}' is not a valid enrollment version number"
                )
            else:
                if version > enrollments - 1:
                    message = f"Version index '{version}' is larger than the number of enrollments " \
                              f"for {name}:{contract_version}."
                    raise ValueError(message)
        return version
Beispiel #24
0
class Learner:
    """
    Any participant in the "learning loop" - a class inheriting from
    this one has the ability, synchronously or asynchronously,
    to learn about nodes in the network, verify some essential
    details about them, and store information about them for later use.
    """

    _SHORT_LEARNING_DELAY = 5
    _LONG_LEARNING_DELAY = 90
    LEARNING_TIMEOUT = 10
    _ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN = 10

    # For Keeps
    __DEFAULT_NODE_STORAGE = ForgetfulNodeStorage
    __DEFAULT_MIDDLEWARE_CLASS = RestMiddleware

    LEARNER_VERSION = LEARNING_LOOP_VERSION
    node_splitter = BytestringSplitter(VariableLengthBytestring)
    version_splitter = BytestringSplitter((int, 2, {"byteorder": "big"}))
    tracker_class = FleetStateTracker

    invalid_metadata_message = "{} has invalid metadata.  Maybe its stake is over?  Or maybe it is transitioning to a new interface.  Ignoring."
    unknown_version_message = "{} purported to be of version {}, but we're only version {}.  Is there a new version of NuCypher?"
    really_unknown_version_message = "Unable to glean address from node that perhaps purported to be version {}.  We're only version {}."
    fleet_state_icon = ""

    class NotEnoughNodes(RuntimeError):
        pass

    class NotEnoughTeachers(NotEnoughNodes):
        pass

    class UnresponsiveTeacher(ConnectionError):
        pass

    class NotATeacher(ValueError):
        """
        Raised when a character cannot be properly utilized because
        it does not have the proper attributes for learning or verification.
        """

    def __init__(
        self,
        domains: Set,
        network_middleware: RestMiddleware = __DEFAULT_MIDDLEWARE_CLASS(),
        start_learning_now: bool = False,
        learn_on_same_thread: bool = False,
        known_nodes: tuple = None,
        seed_nodes: Tuple[tuple] = None,
        node_storage=None,
        save_metadata: bool = False,
        abort_on_learning_error: bool = False,
        lonely: bool = False,
    ) -> None:

        self.log = Logger("learning-loop")  # type: Logger

        self.learning_domains = domains
        self.network_middleware = network_middleware
        self.save_metadata = save_metadata
        self.start_learning_now = start_learning_now
        self.learn_on_same_thread = learn_on_same_thread

        self._abort_on_learning_error = abort_on_learning_error
        self._learning_listeners = defaultdict(list)
        self._node_ids_to_learn_about_immediately = set()

        self.__known_nodes = self.tracker_class()

        self.lonely = lonely
        self.done_seeding = False

        # Read
        if node_storage is None:
            node_storage = self.__DEFAULT_NODE_STORAGE(
                federated_only=self.federated_only,
                # TODO: remove federated_only
                character_class=self.__class__)

        self.node_storage = node_storage
        if save_metadata and node_storage is NO_STORAGE_AVAILIBLE:
            raise ValueError(
                "Cannot save nodes without a configured node storage")

        known_nodes = known_nodes or tuple()
        self.unresponsive_startup_nodes = list(
        )  # TODO: Attempt to use these again later
        for node in known_nodes:
            try:
                self.remember_node(
                    node
                )  # TODO: Need to test this better - do we ever init an Ursula-Learner with Node Storage?
            except self.UnresponsiveTeacher:
                self.unresponsive_startup_nodes.append(node)

        self.teacher_nodes = deque()
        self._current_teacher_node = None  # type: Teacher
        self._learning_task = task.LoopingCall(self.keep_learning_about_nodes)
        self._learning_round = 0  # type: int
        self._rounds_without_new_nodes = 0  # type: int
        self._seed_nodes = seed_nodes or []
        self.unresponsive_seed_nodes = set()

        if self.start_learning_now:
            self.start_learning_loop(now=self.learn_on_same_thread)

    @property
    def known_nodes(self):
        return self.__known_nodes

    def load_seednodes(self,
                       read_storages: bool = True,
                       retry_attempts: int = 3):  # TODO: why are these unused?
        """
        Engage known nodes from storages and pre-fetch hardcoded seednode certificates for node learning.
        """
        if self.done_seeding:
            self.log.debug("Already done seeding; won't try again.")
            return

        from nucypher.characters.lawful import Ursula
        for seednode_metadata in self._seed_nodes:
            self.log.debug("Seeding from: {}|{}:{}".format(
                seednode_metadata.checksum_public_address,
                seednode_metadata.rest_host, seednode_metadata.rest_port))

            seed_node = Ursula.from_seednode_metadata(
                seednode_metadata=seednode_metadata,
                network_middleware=self.network_middleware,
                federated_only=self.federated_only)  # TODO: 466
            if seed_node is False:
                self.unresponsive_seed_nodes.add(seednode_metadata)
            else:
                self.unresponsive_seed_nodes.discard(seednode_metadata)
                self.remember_node(seed_node)

        if not self.unresponsive_seed_nodes:
            self.log.info("Finished learning about all seednodes.")

        self.done_seeding = True

        if read_storages is True:
            self.read_nodes_from_storage()

        if not self.known_nodes:
            self.log.warn(
                "No seednodes were available after {} attempts".format(
                    retry_attempts))
            # TODO: Need some actual logic here for situation with no seed nodes (ie, maybe try again much later)

    def read_nodes_from_storage(self) -> set:
        stored_nodes = self.node_storage.all(
            federated_only=self.federated_only)  # TODO: 466
        for node in stored_nodes:
            self.remember_node(node)

    def remember_node(self,
                      node,
                      force_verification_check=False,
                      record_fleet_state=True):

        if node == self:  # No need to remember self.
            return False

        # First, determine if this is an outdated representation of an already known node.
        with suppress(KeyError):
            already_known_node = self.known_nodes[node.checksum_public_address]
            if not node.timestamp > already_known_node.timestamp:
                self.log.debug("Skipping already known node {}".format(
                    already_known_node))
                # This node is already known.  We can safely return.
                return False

        try:
            stranger_certificate = node.certificate
        except AttributeError:
            # Whoops, we got an Alice, Bob, or someone...
            raise self.NotATeacher(
                f"{node.__class__.__name__} does not have a certificate and cannot be remembered."
            )

        # Store node's certificate - It has been seen.
        certificate_filepath = self.node_storage.store_node_certificate(
            certificate=stranger_certificate)

        # In some cases (seed nodes or other temp stored certs),
        # this will update the filepath from the temp location to this one.
        node.certificate_filepath = certificate_filepath
        self.log.info(
            f"Saved TLS certificate for {node.nickname}: {certificate_filepath}"
        )

        try:
            node.verify_node(
                force=force_verification_check,
                network_middleware=self.network_middleware,
                accept_federated_only=self.federated_only,
                # TODO: 466 - move federated-only up to Learner?
            )
        except SSLError:
            return False  # TODO: Bucket this node as having bad TLS info - maybe it's an update that hasn't fully propagated?

        except NodeSeemsToBeDown:
            return False  # TODO: Bucket this node as "ghost" or something: somebody else knows about it, but we can't get to it.

        listeners = self._learning_listeners.pop(node.checksum_public_address,
                                                 tuple())
        address = node.checksum_public_address

        self.known_nodes[address] = node

        if self.save_metadata:
            self.node_storage.store_node_metadata(node=node)
        #self.log.info("Remembering {} ({}), popping {} listeners.".format(node.nickname, node.checksum_public_address, len(listeners)))
        for listener in listeners:
            listener.add(address)
        self._node_ids_to_learn_about_immediately.discard(address)

        if record_fleet_state:
            self.known_nodes.record_fleet_state()

        return node

    def start_learning_loop(self, now=False):
        if self._learning_task.running:
            return False
        elif now:
            self.log.info("Starting Learning Loop NOW.")

            if self.lonely:
                self.done_seeding = True
                self.read_nodes_from_storage()

            else:
                self.load_seednodes()

            self.learn_from_teacher_node()
            self.learning_deferred = self._learning_task.start(
                interval=self._SHORT_LEARNING_DELAY)
            self.learning_deferred.addErrback(self.handle_learning_errors)
            return self.learning_deferred
        else:
            self.log.info("Starting Learning Loop.")

            learning_deferreds = list()
            if not self.lonely:
                seeder_deferred = deferToThread(self.load_seednodes)
                seeder_deferred.addErrback(self.handle_learning_errors)
                learning_deferreds.append(seeder_deferred)

            learner_deferred = self._learning_task.start(
                interval=self._SHORT_LEARNING_DELAY, now=now)
            learner_deferred.addErrback(self.handle_learning_errors)
            learning_deferreds.append(learner_deferred)

            self.learning_deferred = defer.DeferredList(learning_deferreds)
            return self.learning_deferred

    def stop_learning_loop(self, reason=None):
        """
        Only for tests at this point.  Maybe some day for graceful shutdowns.
        """
        self._learning_task.stop()

    def handle_learning_errors(self, *args, **kwargs):
        failure = args[0]
        if self._abort_on_learning_error:
            self.log.critical(
                "Unhandled error during node learning.  Attempting graceful crash."
            )
            reactor.callFromThread(self._crash_gracefully, failure=failure)
        else:
            self.log.warn("Unhandled error during node learning: {}".format(
                failure.getTraceback()))
            if not self._learning_task.running:
                self.start_learning_loop(
                )  # TODO: Consider a single entry point for this with more elegant pause and unpause.

    def _crash_gracefully(self, failure=None):
        """
        A facility for crashing more gracefully in the event that an exception
        is unhandled in a different thread, especially inside a loop like the learning loop.
        """
        self._crashed = failure
        failure.raiseException()
        # TODO: We don't actually have checksum_public_address at this level - maybe only Characters can crash gracefully :-)
        self.log.critical("{} crashed with {}".format(
            self.checksum_public_address, failure))

    def select_teacher_nodes(self):
        nodes_we_know_about = self.known_nodes.shuffled()

        if not nodes_we_know_about:
            raise self.NotEnoughTeachers(
                "Need some nodes to start learning from.")

        self.teacher_nodes.extend(nodes_we_know_about)

    def cycle_teacher_node(self):
        # To ensure that all the best teachers are available, first let's make sure
        # that we have connected to all the seed nodes.
        if self.unresponsive_seed_nodes and not self.lonely:
            self.log.info(
                "Still have unresponsive seed nodes; trying again to connect.")
            self.load_seednodes()  # Ideally, this is async and singular.

        if not self.teacher_nodes:
            self.select_teacher_nodes()
        try:
            self._current_teacher_node = self.teacher_nodes.pop()
        except IndexError:
            error = "Not enough nodes to select a good teacher, Check your network connection then node configuration"
            raise self.NotEnoughTeachers(error)
        self.log.info("Cycled teachers; New teacher is {}".format(
            self._current_teacher_node))

    def current_teacher_node(self, cycle=False):
        if cycle:
            self.cycle_teacher_node()

        if not self._current_teacher_node:
            self.cycle_teacher_node()

        teacher = self._current_teacher_node

        return teacher

    def learn_about_nodes_now(self, force=False):
        if self._learning_task.running:
            self._learning_task.reset()
            self._learning_task()
        elif not force:
            self.log.warn(
                "Learning loop isn't started; can't learn about nodes now.  You can override this with force=True."
            )
        elif force:
            self.log.info("Learning loop wasn't started; forcing start now.")
            self._learning_task.start(self._SHORT_LEARNING_DELAY, now=True)

    def keep_learning_about_nodes(self):
        """
        Continually learn about new nodes.
        """
        # TODO: Allow the user to set eagerness?
        self.learn_from_teacher_node(eager=False)

    def learn_about_specific_nodes(self, addresses: Set):
        self._node_ids_to_learn_about_immediately.update(addresses)  # hmmmm
        self.learn_about_nodes_now()

    # TODO: Dehydrate these next two methods.

    def block_until_number_of_known_nodes_is(
            self,
            number_of_nodes_to_know: int,
            timeout: int = 10,
            learn_on_this_thread: bool = False):
        start = maya.now()
        starting_round = self._learning_round

        while True:
            rounds_undertaken = self._learning_round - starting_round
            if len(self.__known_nodes) >= number_of_nodes_to_know:
                if rounds_undertaken:
                    self.log.info(
                        "Learned about enough nodes after {} rounds.".format(
                            rounds_undertaken))
                return True

            if not self._learning_task.running:
                self.log.warn(
                    "Blocking to learn about nodes, but learning loop isn't running."
                )
            if learn_on_this_thread:
                try:
                    self.learn_from_teacher_node(eager=True)
                except (requests.exceptions.ReadTimeout,
                        requests.exceptions.ConnectTimeout):
                    # TODO: Even this "same thread" logic can be done off the main thread.
                    self.log.warn(
                        "Teacher was unreachable.  No good way to handle this on the main thread."
                    )

            # The rest of the f*****g owl
            if (maya.now() - start).seconds > timeout:
                if not self._learning_task.running:
                    raise RuntimeError(
                        "Learning loop is not running.  Start it with start_learning()."
                    )
                else:
                    raise self.NotEnoughNodes(
                        "After {} seconds and {} rounds, didn't find {} nodes".
                        format(timeout, rounds_undertaken,
                               number_of_nodes_to_know))
            else:
                time.sleep(.1)

    def block_until_specific_nodes_are_known(self,
                                             addresses: Set,
                                             timeout=LEARNING_TIMEOUT,
                                             allow_missing=0,
                                             learn_on_this_thread=False):
        start = maya.now()
        starting_round = self._learning_round

        while True:
            if self._crashed:
                return self._crashed
            rounds_undertaken = self._learning_round - starting_round
            if addresses.issubset(self.known_nodes.addresses()):
                if rounds_undertaken:
                    self.log.info(
                        "Learned about all nodes after {} rounds.".format(
                            rounds_undertaken))
                return True

            if not self._learning_task.running:
                self.log.warn(
                    "Blocking to learn about nodes, but learning loop isn't running."
                )
            if learn_on_this_thread:
                self.learn_from_teacher_node(eager=True)

            if (maya.now() - start).seconds > timeout:

                still_unknown = addresses.difference(
                    self.known_nodes.addresses())

                if len(still_unknown) <= allow_missing:
                    return False
                elif not self._learning_task.running:
                    raise self.NotEnoughTeachers(
                        "The learning loop is not running.  Start it with start_learning()."
                    )
                else:
                    raise self.NotEnoughTeachers(
                        "After {} seconds and {} rounds, didn't find these {} nodes: {}"
                        .format(timeout, rounds_undertaken, len(still_unknown),
                                still_unknown))
            else:
                time.sleep(.1)

    def _adjust_learning(self, node_list):
        """
        Takes a list of new nodes, adjusts learning accordingly.

        Currently, simply slows down learning loop when no new nodes have been discovered in a while.
        TODO: Do other important things - scrub, bucket, etc.
        """
        if node_list:
            self._rounds_without_new_nodes = 0
            self._learning_task.interval = self._SHORT_LEARNING_DELAY
        else:
            self._rounds_without_new_nodes += 1
            if self._rounds_without_new_nodes > self._ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN:
                self.log.info(
                    "After {} rounds with no new nodes, it's time to slow down to {} seconds."
                    .format(
                        self._ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN,
                        self._LONG_LEARNING_DELAY))
                self._learning_task.interval = self._LONG_LEARNING_DELAY

    def _push_certain_newly_discovered_nodes_here(self, queue_to_push,
                                                  node_addresses):
        """
        If any node_addresses are discovered, push them to queue_to_push.
        """
        for node_address in node_addresses:
            self.log.info("Adding listener for {}".format(node_address))
            self._learning_listeners[node_address].append(queue_to_push)

    def network_bootstrap(self, node_list: list) -> None:
        for node_addr, port in node_list:
            new_nodes = self.learn_about_nodes_now(node_addr, port)
            self.__known_nodes.update(new_nodes)

    def get_nodes_by_ids(self, node_ids):
        for node_id in node_ids:
            try:
                # Scenario 1: We already know about this node.
                return self.__known_nodes[node_id]
            except KeyError:
                raise NotImplementedError
        # Scenario 2: We don't know about this node, but a nearby node does.
        # TODO: Build a concurrent pool of lookups here.

        # Scenario 3: We don't know about this node, and neither does our friend.

    def write_node_metadata(self, node, serializer=bytes) -> str:
        return self.node_storage.store_node_metadata(node=node)

    def learn_from_teacher_node(self, eager=True):
        """
        Sends a request to node_url to find out about known nodes.
        """
        self._learning_round += 1

        try:
            current_teacher = self.current_teacher_node()
        except self.NotEnoughTeachers as e:
            self.log.warn("Can't learn right now: {}".format(e.args[0]))
            return

        if Teacher in self.__class__.__bases__:
            announce_nodes = [self]
        else:
            announce_nodes = None

        unresponsive_nodes = set()
        try:
            # TODO: Streamline path generation
            certificate_filepath = self.node_storage.generate_certificate_filepath(
                checksum_address=current_teacher.checksum_public_address)
            response = self.network_middleware.get_nodes_via_rest(
                node=current_teacher,
                nodes_i_need=self._node_ids_to_learn_about_immediately,
                announce_nodes=announce_nodes,
                fleet_checksum=self.known_nodes.checksum)
        except NodeSeemsToBeDown as e:
            unresponsive_nodes.add(current_teacher)
            self.log.info("Bad Response from teacher: {}:{}.".format(
                current_teacher, e))
            return
        finally:
            self.cycle_teacher_node()

        #
        # Before we parse the response, let's handle some edge cases.
        if response.status_code == 204:
            # In this case, this node knows about no other nodes.  Hopefully we've taught it something.
            if response.content == b"":
                return NO_KNOWN_NODES
            # In the other case - where the status code is 204 but the repsonse isn't blank - we'll keep parsing.
            # It's possible that our fleet states match, and we'll check for that later.

        elif response.status_code != 200:
            self.log.info("Bad response from teacher {}: {} - {}".format(
                current_teacher, response, response.content))
            return

        try:
            signature, node_payload = signature_splitter(response.content,
                                                         return_remainder=True)
        except BytestringSplittingError as e:
            self.log.warn(e.args[0])
            return

        try:
            self.verify_from(current_teacher,
                             node_payload,
                             signature=signature)
        except current_teacher.InvalidSignature:
            # TODO: What to do if the teacher improperly signed the node payload?
            raise
        # End edge case handling.
        #

        fleet_state_checksum_bytes, fleet_state_updated_bytes, node_payload = FleetStateTracker.snapshot_splitter(
            node_payload, return_remainder=True)
        current_teacher.last_seen = maya.now()
        # TODO: This is weird - let's get a stranger FleetState going.
        checksum = fleet_state_checksum_bytes.hex()

        # TODO: This doesn't make sense - a decentralized node can still learn about a federated-only node.
        from nucypher.characters.lawful import Ursula
        if constant_or_bytes(node_payload) is FLEET_STATES_MATCH:
            current_teacher.update_snapshot(
                checksum=checksum,
                updated=maya.MayaDT(
                    int.from_bytes(fleet_state_updated_bytes,
                                   byteorder="big")),
                number_of_known_nodes=len(self.known_nodes))
            return FLEET_STATES_MATCH

        node_list = Ursula.batch_from_bytes(
            node_payload, federated_only=self.federated_only)  # TODO: 466

        current_teacher.update_snapshot(checksum=checksum,
                                        updated=maya.MayaDT(
                                            int.from_bytes(
                                                fleet_state_updated_bytes,
                                                byteorder="big")),
                                        number_of_known_nodes=len(node_list))

        new_nodes = []
        for node in node_list:
            if GLOBAL_DOMAIN not in self.learning_domains:
                if not self.learning_domains.intersection(
                        node.serving_domains):
                    continue  # This node is not serving any of our domains.

            # First, determine if this is an outdated representation of an already known node.
            with suppress(KeyError):
                already_known_node = self.known_nodes[
                    node.checksum_public_address]
                if not node.timestamp > already_known_node.timestamp:
                    self.log.debug("Skipping already known node {}".format(
                        already_known_node))
                    # This node is already known.  We can safely continue to the next.
                    continue

            certificate_filepath = self.node_storage.store_node_certificate(
                certificate=node.certificate)

            try:
                if eager:
                    node.verify_node(
                        self.network_middleware,
                        accept_federated_only=self.federated_only,  # TODO: 466
                        certificate_filepath=certificate_filepath)
                    self.log.debug("Verified node: {}".format(
                        node.checksum_public_address))

                else:
                    node.validate_metadata(
                        accept_federated_only=self.federated_only)  # TODO: 466
            # This block is a mess of eagerness.  This can all be done better lazily.
            except NodeSeemsToBeDown as e:
                self.log.info(
                    f"Can't connect to {node} to verify it right now.")
            except node.InvalidNode:
                # TODO: Account for possibility that stamp, rather than interface, was bad.
                self.log.warn(node.invalid_metadata_message.format(node))
            except node.SuspiciousActivity:
                message = "Suspicious Activity: Discovered node with bad signature: {}.  " \
                          "Propagated by: {}".format(current_teacher.checksum_public_address, teacher_uri)
                self.log.warn(message)
            else:
                new = self.remember_node(node, record_fleet_state=False)
                if new:
                    new_nodes.append(node)

        self._adjust_learning(new_nodes)

        learning_round_log_message = "Learning round {}.  Teacher: {} knew about {} nodes, {} were new."
        self.log.info(
            learning_round_log_message.format(self._learning_round,
                                              current_teacher, len(node_list),
                                              len(new_nodes)), )
        if new_nodes:
            self.known_nodes.record_fleet_state()
            for node in new_nodes:
                self.node_storage.store_node_certificate(
                    certificate=node.certificate)
        return new_nodes
Beispiel #25
0
class ContractAdministrator(NucypherTokenActor):
    """
    The administrator of network contracts.
    """

    __interface_class = BlockchainDeployerInterface

    #
    # Deployer classes sorted by deployment dependency order.
    #

    standard_deployer_classes = (NucypherTokenDeployer, )

    dispatched_upgradeable_deployer_classes = (
        StakingEscrowDeployer,
        PolicyManagerDeployer,
        AdjudicatorDeployer,
    )

    upgradeable_deployer_classes = (
        *dispatched_upgradeable_deployer_classes,
        StakingInterfaceDeployer,
    )

    ownable_deployer_classes = (*dispatched_upgradeable_deployer_classes, )

    deployer_classes = (*standard_deployer_classes,
                        *upgradeable_deployer_classes)

    class UnknownContract(ValueError):
        pass

    def __init__(self,
                 registry: BaseContractRegistry,
                 deployer_address: str = None,
                 client_password: str = None,
                 economics: TokenEconomics = None):
        """
        Note: super() is not called here to avoid setting the token agent.
        TODO: Review this logic ^^ "bare mode".
        """
        self.log = Logger("Deployment-Actor")

        self.deployer_address = deployer_address
        self.checksum_address = self.deployer_address
        self.economics = economics or StandardTokenEconomics()

        self.registry = registry
        self.preallocation_escrow_deployers = dict()
        self.deployers = {d.contract_name: d for d in self.deployer_classes}

        self.transacting_power = TransactingPower(password=client_password,
                                                  account=deployer_address)
        self.transacting_power.activate()

    def __repr__(self):
        r = '{name} - {deployer_address})'.format(
            name=self.__class__.__name__,
            deployer_address=self.deployer_address)
        return r

    def __get_deployer(self, contract_name: str):
        try:
            Deployer = self.deployers[contract_name]
        except KeyError:
            raise self.UnknownContract(contract_name)
        return Deployer

    @staticmethod
    def collect_deployment_secret(deployer) -> str:
        secret = click.prompt(
            f'Enter {deployer.contract_name} Deployment Secret',
            hide_input=True,
            confirmation_prompt=True)
        return secret

    def collect_deployment_secrets(self) -> dict:
        secrets = dict()
        for deployer in self.upgradeable_deployer_classes:
            secrets[deployer.contract_name] = self.collect_deployment_secret(
                deployer)
        return secrets

    def deploy_contract(
        self,
        contract_name: str,
        gas_limit: int = None,
        plaintext_secret: str = None,
        bare: bool = False,
        progress=None,
        *args,
        **kwargs,
    ) -> Tuple[dict, BaseContractDeployer]:

        Deployer = self.__get_deployer(contract_name=contract_name)
        deployer = Deployer(registry=self.registry,
                            deployer_address=self.deployer_address,
                            economics=self.economics,
                            *args,
                            **kwargs)

        if Deployer._upgradeable:
            is_initial_deployment = not bare
            if is_initial_deployment and not plaintext_secret:
                raise ValueError(
                    "An upgrade secret must be passed to perform initial deployment of a Dispatcher."
                )
            secret_hash = None
            if plaintext_secret:
                secret_hash = keccak(bytes(plaintext_secret, encoding='utf-8'))
            receipts = deployer.deploy(
                secret_hash=secret_hash,
                gas_limit=gas_limit,
                initial_deployment=is_initial_deployment,
                progress=progress)
        else:
            receipts = deployer.deploy(gas_limit=gas_limit, progress=progress)
        return receipts, deployer

    def upgrade_contract(self, contract_name: str,
                         existing_plaintext_secret: str,
                         new_plaintext_secret: str) -> dict:
        Deployer = self.__get_deployer(contract_name=contract_name)
        deployer = Deployer(registry=self.registry,
                            deployer_address=self.deployer_address)
        new_secret_hash = keccak(bytes(new_plaintext_secret, encoding='utf-8'))
        receipts = deployer.upgrade(existing_secret_plaintext=bytes(
            existing_plaintext_secret, encoding='utf-8'),
                                    new_secret_hash=new_secret_hash)
        return receipts

    def retarget_proxy(self, contract_name: str, target_address: str,
                       existing_plaintext_secret: str,
                       new_plaintext_secret: str):
        Deployer = self.__get_deployer(contract_name=contract_name)
        deployer = Deployer(registry=self.registry,
                            deployer_address=self.deployer_address)
        new_secret_hash = keccak(bytes(new_plaintext_secret, encoding='utf-8'))
        receipts = deployer.retarget(target_address=target_address,
                                     existing_secret_plaintext=bytes(
                                         existing_plaintext_secret,
                                         encoding='utf-8'),
                                     new_secret_hash=new_secret_hash)
        return receipts

    def rollback_contract(self, contract_name: str,
                          existing_plaintext_secret: str,
                          new_plaintext_secret: str):
        Deployer = self.__get_deployer(contract_name=contract_name)
        deployer = Deployer(registry=self.registry,
                            deployer_address=self.deployer_address)
        new_secret_hash = keccak(bytes(new_plaintext_secret, encoding='utf-8'))
        receipts = deployer.rollback(existing_secret_plaintext=bytes(
            existing_plaintext_secret, encoding='utf-8'),
                                     new_secret_hash=new_secret_hash)
        return receipts

    def deploy_preallocation_escrow(
            self,
            allocation_registry: AllocationRegistry,
            progress=None) -> PreallocationEscrowDeployer:
        preallocation_escrow_deployer = PreallocationEscrowDeployer(
            registry=self.registry,
            deployer_address=self.deployer_address,
            allocation_registry=allocation_registry)
        preallocation_escrow_deployer.deploy(progress=progress)
        principal_address = preallocation_escrow_deployer.contract.address
        self.preallocation_escrow_deployers[
            principal_address] = preallocation_escrow_deployer
        return preallocation_escrow_deployer

    def deploy_network_contracts(self,
                                 secrets: dict,
                                 interactive: bool = True,
                                 emitter: StdoutEmitter = None,
                                 etherscan: bool = False) -> dict:
        """

        :param secrets: Contract upgrade secrets dictionary
        :param interactive: If True, wait for keypress after each contract deployment
        :param emitter: A console output emitter instance. If emitter is None, no output will be echoed to the console.
        :param etherscan: Open deployed contracts in Etherscan
        :return: Returns a dictionary of deployment receipts keyed by contract name
        """

        if interactive and not emitter:
            raise ValueError(
                "'emitter' is a required keyword argument when interactive is True."
            )

        deployment_receipts = dict()
        gas_limit = None  # TODO: Gas management

        # deploy contracts
        total_deployment_transactions = 0
        for deployer_class in self.deployer_classes:
            total_deployment_transactions += len(
                deployer_class.deployment_steps)

        first_iteration = True
        with click.progressbar(length=total_deployment_transactions,
                               label="Deployment progress",
                               show_eta=False) as bar:
            bar.short_limit = 0
            for deployer_class in self.deployer_classes:
                if interactive and not first_iteration:
                    click.pause(
                        info=
                        f"\nPress any key to continue with deployment of {deployer_class.contract_name}"
                    )

                if emitter:
                    emitter.echo(
                        f"\nDeploying {deployer_class.contract_name} ...")
                    bar._last_line = None
                    bar.render_progress()

                if deployer_class in self.standard_deployer_classes:
                    receipts, deployer = self.deploy_contract(
                        contract_name=deployer_class.contract_name,
                        gas_limit=gas_limit,
                        progress=bar)
                else:
                    receipts, deployer = self.deploy_contract(
                        contract_name=deployer_class.contract_name,
                        plaintext_secret=secrets[deployer_class.contract_name],
                        gas_limit=gas_limit,
                        progress=bar)

                if emitter:
                    blockchain = BlockchainInterfaceFactory.get_interface()
                    paint_contract_deployment(
                        contract_name=deployer_class.contract_name,
                        receipts=receipts,
                        contract_address=deployer.contract_address,
                        emitter=emitter,
                        chain_name=blockchain.client.chain_name,
                        open_in_browser=etherscan)

                deployment_receipts[deployer_class.contract_name] = receipts
                first_iteration = False

        return deployment_receipts

    def relinquish_ownership(self,
                             new_owner: str,
                             emitter: StdoutEmitter = None,
                             interactive: bool = True,
                             transaction_gas_limit: int = None) -> dict:

        if not is_checksum_address(new_owner):
            raise ValueError(
                f"{new_owner} is an invalid EIP-55 checksum address.")

        receipts = dict()

        for contract_deployer in self.ownable_deployer_classes:
            deployer = contract_deployer(
                registry=self.registry, deployer_address=self.deployer_address)
            deployer.transfer_ownership(
                new_owner=new_owner,
                transaction_gas_limit=transaction_gas_limit)

            if emitter:
                emitter.echo(
                    f"Transferred ownership of {deployer.contract_name} to {new_owner}"
                )

            if interactive:
                click.pause(info="Press any key to continue")

            receipts[contract_deployer.contract_name] = receipts

        return receipts

    def deploy_beneficiary_contracts(
        self,
        allocations: List[Dict[str, Union[str, int]]],
        allocation_outfile: str = None,
        allocation_registry: AllocationRegistry = None,
        crash_on_failure: bool = True,
        interactive: bool = True,
        emitter: StdoutEmitter = None,
    ) -> Dict[str, dict]:
        """
        The allocation file is a JSON file containing a list of allocations. Each allocation has a:
          * 'beneficiary_address': Checksum address of the beneficiary
          * 'name': User-friendly name of the beneficiary (Optional)
          * 'amount': Amount of tokens locked, in NuNits
          * 'duration_seconds': Lock duration expressed in seconds

        Example allocation file:

        [ {'beneficiary_address': '0xdeadbeef', 'name': 'H. E. Pennypacker', 'amount': 100, 'duration_seconds': 31536000},
          {'beneficiary_address': '0xabced120', 'amount': 133432, 'duration_seconds': 31536000},
          {'beneficiary_address': '0xf7aefec2', 'amount': 999, 'duration_seconds': 31536000}]

        """

        if interactive and not emitter:
            raise ValueError(
                "'emitter' is a required keyword argument when interactive is True."
            )

        if allocation_registry and allocation_outfile:
            raise self.ActorError(
                "Pass either allocation registry or allocation_outfile, not both."
            )
        if allocation_registry is None:
            allocation_registry = AllocationRegistry(
                filepath=allocation_outfile)

        if emitter:
            paint_input_allocation_file(emitter, allocations)

        if interactive:
            click.confirm("Continue with the allocation process?", abort=True)

        total_to_allocate = NU.from_nunits(
            sum(allocation['amount'] for allocation in allocations))
        balance = ContractAgency.get_agent(NucypherTokenAgent,
                                           self.registry).get_balance(
                                               self.deployer_address)
        if balance < total_to_allocate:
            raise ValueError(
                f"Not enough tokens to allocate. We need at least {total_to_allocate}."
            )

        allocation_receipts, failed, allocated = dict(), list(), list()
        total_deployment_transactions = len(allocations) * 4

        # Create an allocation template file, containing the allocation contract ABI and placeholder values
        # for the beneficiary and contract addresses. This file will be shared with all allocation users.
        empty_allocation_escrow_deployer = PreallocationEscrowDeployer(
            registry=self.registry)
        allocation_contract_abi = empty_allocation_escrow_deployer.get_contract_abi(
        )
        allocation_template = {
            "BENEFICIARY_ADDRESS":
            ["ALLOCATION_CONTRACT_ADDRESS", allocation_contract_abi]
        }

        parent_path = Path(allocation_registry.filepath
                           ).parent  # Use same folder as allocation registry
        template_filename = IndividualAllocationRegistry.REGISTRY_NAME
        template_filepath = os.path.join(parent_path, template_filename)
        AllocationRegistry(filepath=template_filepath).write(
            registry_data=allocation_template)
        if emitter:
            emitter.echo(
                f"Saved allocation template file to {template_filepath}",
                color='blue',
                bold=True)

        # Deploy each allocation contract
        with click.progressbar(length=total_deployment_transactions,
                               label="Allocation progress",
                               show_eta=False) as bar:
            bar.short_limit = 0
            for allocation in allocations:

                # TODO: Check if allocation already exists in allocation registry

                beneficiary = allocation['beneficiary_address']
                name = allocation.get('name', 'No name provided')

                if interactive:
                    click.pause(
                        info=f"\nPress any key to continue with allocation for "
                        f"beneficiary {beneficiary} ({name})")

                if emitter:
                    emitter.echo(
                        f"\nDeploying PreallocationEscrow contract for beneficiary {beneficiary} ({name})..."
                    )
                    bar._last_line = None
                    bar.render_progress()

                deployer = self.deploy_preallocation_escrow(
                    allocation_registry=allocation_registry, progress=bar)

                amount = allocation['amount']
                duration = allocation['duration_seconds']
                try:
                    receipts = deployer.deliver(
                        value=amount,
                        duration=duration,
                        beneficiary_address=beneficiary,
                        progress=bar)
                except TransactionFailed as e:
                    if crash_on_failure:
                        raise
                    self.log.debug(
                        f"Failed allocation transaction for {NU.from_nunits(amount)} to {beneficiary}: {e}"
                    )
                    failed.append(allocation)
                    continue

                else:
                    allocation_receipts[beneficiary] = receipts
                    allocation_contract_address = deployer.contract_address
                    self.log.info(
                        f"Created {deployer.contract_name} contract at {allocation_contract_address} "
                        f"for beneficiary {beneficiary}.")
                    allocated.append((allocation, allocation_contract_address))

                    # Create individual allocation file
                    individual_allocation_filename = f'allocation-{beneficiary}.json'
                    individual_allocation_filepath = os.path.join(
                        parent_path, individual_allocation_filename)
                    individual_allocation_file_data = {
                        'beneficiary_address': beneficiary,
                        'contract_address': allocation_contract_address
                    }
                    with open(individual_allocation_filepath, 'w') as outfile:
                        json.dump(individual_allocation_file_data, outfile)

                    if emitter:
                        blockchain = BlockchainInterfaceFactory.get_interface()
                        paint_contract_deployment(
                            contract_name=deployer.contract_name,
                            receipts=receipts,
                            contract_address=deployer.contract_address,
                            emitter=emitter,
                            chain_name=blockchain.client.chain_name,
                            open_in_browser=False)
                        emitter.echo(
                            f"Saved individual allocation file to {individual_allocation_filepath}",
                            color='blue',
                            bold=True)

            if emitter:
                paint_deployed_allocations(emitter, allocated, failed)

            csv_filename = f'allocations-{self.deployer_address[:6]}-{maya.now().epoch}.csv'
            csv_filepath = os.path.join(parent_path, csv_filename)
            write_deployed_allocations_to_csv(csv_filepath, allocated, failed)
            if emitter:
                emitter.echo(f"Saved allocation summary CSV to {csv_filepath}",
                             color='blue',
                             bold=True)

            if failed:
                # TODO: More with these failures: send to isolated logfile, and reattempt
                self.log.critical(
                    f"FAILED TOKEN ALLOCATION - {len(failed)} allocations failed."
                )

        return allocation_receipts

    @staticmethod
    def __read_allocation_data(filepath: str) -> list:
        with open(filepath, 'r') as allocation_file:
            data = allocation_file.read()
            try:
                allocation_data = json.loads(data)
            except JSONDecodeError:
                raise
        return allocation_data

    def deploy_beneficiaries_from_file(self,
                                       allocation_data_filepath: str,
                                       allocation_outfile: str = None,
                                       emitter=None,
                                       interactive=None) -> dict:

        allocations = self.__read_allocation_data(
            filepath=allocation_data_filepath)
        receipts = self.deploy_beneficiary_contracts(
            allocations=allocations,
            allocation_outfile=allocation_outfile,
            emitter=emitter,
            interactive=interactive)
        # Save transaction metadata
        receipts_filepath = self.save_deployment_receipts(
            receipts=receipts, filename_prefix='allocation')
        if emitter:
            emitter.echo(f"Saved allocation receipts to {receipts_filepath}",
                         color='blue',
                         bold=True)
        return receipts

    def save_deployment_receipts(self,
                                 receipts: dict,
                                 filename_prefix: str = 'deployment') -> str:
        filename = f'{filename_prefix}-receipts-{self.deployer_address[:6]}-{maya.now().epoch}.json'
        filepath = os.path.join(DEFAULT_CONFIG_ROOT, filename)
        # TODO: Do not assume default config root
        os.makedirs(DEFAULT_CONFIG_ROOT, exist_ok=True)
        with open(filepath, 'w') as file:
            data = dict()
            for contract_name, receipts in receipts.items():
                contract_records = dict()
                for tx_name, receipt in receipts.items():
                    # Formatting
                    receipt = {
                        item: str(result)
                        for item, result in receipt.items()
                    }
                    contract_records.update(
                        {tx_name: receipt
                         for tx_name in receipts})
                data[contract_name] = contract_records
            data = json.dumps(data, indent=4)
            file.write(data)
        return filepath
Beispiel #26
0
class SmsFactory(ClientFactory, Client):
    room = 'NA'
    actions = ('sendsms, readsms')

    def __init__(self, event_fct=None):
        self.protocol = serialLineProtocol()
        self.uid = uuid.uuid4()
        self.protocol.factory = self
        self.log = Logger()
        self.first = True
        self.event = event_fct
        self.callback = None
        self.wait = False
        self.response = ''
        self.resp_re = re.compile(
            r'^OK|ERROR|(\+CM[ES] ERROR: \d+)|(COMMAND NOT SUPPORT)$')

    def receive(self, line):
        if self.wait:
            if self.resp_re.match(line):
                self.wait = False
                self.response.append(line)
                if line.startswith('ERROR'):
                    self.log.critical('error from Modem: %s' % line)
                    if self.callback:
                        self.callback.errback(self.response)
                else:
                    if self.callback:
                        self.callback.callback(self.response)
                self.response = ''
                if self.callback:
                    self.callback = None
            else:
                self.response.append(line)
        elif self.event:
            self.event(line)
        else:
            self.log.debug('unmanaged message from Modem: %s' % line)

    def sendsms(self, recipient, message, callback_fct=None):
        def recipient_set(res):
            self.log.debug(
                'do we have > ? ==> %s' % ('OK' if res == '>' else 'No: ' + res))
            self.callback = defer.Deferred
            if callback_fct:
                self.callback.addCallback(callback_fct)
            self.wait = True
            self.protocol.send(message + b'\x1a')

        def text_mode(res):
            self.callback = defer.Deferred
            self.callback.addCallback(recipient_set)
            self.wait = True
            self.protocol.send(b'AT+CMGS="' + recipient.encode() + b'"\r')

        def modem_init(res):
            self.first = False
            self.callback = defer.Deferred
            self.callback.addCallback(text_mode)
            self.wait = True
            self.protocol.send(b'AT+CMGF=1\r')
        if self.first:
            self.wait = True
            self.callback = defer.Deferred()
            self.callback.addCallback(modem_init)
            self.protocol.send(b'ATZ\r')
        else:
            modem_init('OK')

    def _write(self, txt):
        self.protocol.send(txt.encode())
Beispiel #27
0
class StakeTracker:

    REFRESH_RATE = 60

    tracking_addresses = set()

    __stakes = dict()  # type: Dict[str: List[Stake]]
    __actions = list()  # type: List[Tuple[Callable, tuple]]

    def __init__(self,
                 checksum_addresses: List[str],
                 refresh_rate: int = None,
                 start_now: bool = False,
                 *args,
                 **kwargs):

        super().__init__(*args, **kwargs)

        self.log = Logger('stake-tracker')
        self.staking_agent = StakingEscrowAgent()

        self._refresh_rate = refresh_rate or self.REFRESH_RATE
        self._tracking_task = task.LoopingCall(self.__update)

        self.__current_period = None
        self.__stakes = dict()
        self.__start_time = NOT_STAKING
        self.__uptime_period = NOT_STAKING
        self.__terminal_period = NOT_STAKING
        self._abort_on_stake_tracking_error = True

        # "load-in":  Read on-chain stakes
        for checksum_address in checksum_addresses:
            if not is_checksum_address(checksum_address):
                raise ValueError(
                    f'{checksum_address} is not a valid EIP-55 checksum address'
                )
            self.tracking_addresses.add(checksum_address)

        if start_now:
            self.start()  # deamonize
        else:
            self.refresh(checksum_addresses=checksum_addresses)  # read-once

    @validate_checksum_address
    def __getitem__(self, checksum_address: str):
        stakes = self.stakes(checksum_address=checksum_address)
        return stakes

    def add_action(self, func: Callable, args=()) -> None:
        self.__actions.append((func, args))

    def clear_actions(self) -> None:
        self.__actions.clear()

    @property
    def current_period(self):
        return self.__current_period

    @validate_checksum_address
    def stakes(self, checksum_address: str) -> List[Stake]:
        """Return all cached stake instances from the blockchain."""
        try:
            return self.__stakes[checksum_address]
        except KeyError:
            return NO_STAKES.bool_value(False)
        except TypeError:
            if self.__stakes in (UNKNOWN_STAKES, NO_STAKES):
                return NO_STAKES.bool_value(False)
            raise

    @validate_checksum_address
    def refresh(self, checksum_addresses: List[str] = None) -> None:
        """Public staking cache invalidation method"""
        return self.__read_stakes(checksum_addresses=checksum_addresses)

    def stop(self) -> None:
        self._tracking_task.stop()
        self.log.info(f"STOPPED STAKE TRACKING")

    def start(self, force: bool = False) -> None:
        """
        High-level stake tracking initialization, this function aims
        to be safely called at any time - For example, it is okay to call
        this function multiple times within the same period.
        """
        if self._tracking_task.running and not force:
            return

        # Record the start time and period
        self.__start_time = maya.now()
        self.__uptime_period = self.staking_agent.get_current_period()
        self.__current_period = self.__uptime_period

        d = self._tracking_task.start(interval=self._refresh_rate)
        d.addErrback(self.handle_tracking_errors)
        self.log.info(
            f"STARTED STAKE TRACKING for {len(self.tracking_addresses)} addresses"
        )

    def _crash_gracefully(self, failure=None) -> None:
        """
        A facility for crashing more gracefully in the event that
        an exception is unhandled in a different thread.
        """
        self._crashed = failure
        failure.raiseException()

    def handle_tracking_errors(self, *args, **kwargs) -> None:
        failure = args[0]
        if self._abort_on_stake_tracking_error:
            self.log.critical(
                f"Unhandled error during node stake tracking. {failure}")
            reactor.callFromThread(self._crash_gracefully, failure=failure)
        else:
            self.log.warn(
                f"Unhandled error during stake tracking: {failure.getTraceback()}"
            )

    def __update(self) -> None:
        self.log.info(
            f"Checking for new period. Current period is {self.__current_period}"
        )
        onchain_period = self.staking_agent.get_current_period(
        )  # < -- Read from contract
        if self.__current_period != onchain_period:
            self.__current_period = onchain_period
            self.__read_stakes()
            for action, args in self.__actions:
                action(*args)

    @validate_checksum_address
    def __read_stakes(self, checksum_addresses: List[str] = None) -> None:
        """Rewrite the local staking cache by reading on-chain stakes"""

        if not checksum_addresses:
            checksum_addresses = self.tracking_addresses

        for checksum_address in checksum_addresses:

            if not is_checksum_address(checksum_address):
                if self._abort_on_stake_tracking_error:
                    raise ValueError(
                        f'{checksum_address} is not a valid EIP-55 checksum address'
                    )
                self.tracking_addresses.remove(checksum_address)  # Prune

            existing_records = len(
                self.stakes(checksum_address=checksum_address))

            # Candidate replacement cache values
            onchain_stakes, terminal_period = list(), 0

            # Read from blockchain
            stakes_reader = self.staking_agent.get_all_stakes(
                staker_address=checksum_address)
            for onchain_index, stake_info in enumerate(stakes_reader):

                if not stake_info:
                    onchain_stake = EMPTY_STAKING_SLOT

                else:
                    onchain_stake = Stake.from_stake_info(
                        checksum_address=checksum_address,
                        stake_info=stake_info,
                        index=onchain_index)

                    # rack the latest terminal period
                    if onchain_stake.end_period > terminal_period:
                        terminal_period = onchain_stake.end_period

                # Store the replacement stake
                onchain_stakes.append(onchain_stake)

            # Commit the new stake and terminal values to the cache
            if not onchain_stakes:
                self.__stakes[checksum_address] = NO_STAKES.bool_value(False)
            else:
                self.__terminal_period = terminal_period
                self.__stakes[checksum_address] = onchain_stakes
                new_records = existing_records - len(
                    self.__stakes[checksum_address])
                self.log.debug(
                    f"Updated local staking cache ({new_records} new stakes).")

            # Record most recent cache update
            self.__updated = maya.now()
Beispiel #28
0
class Rest(object):
    def __init__(self,
                 host='https://developer-api.nest.com',
                 token=None,
                 event_handler=None,
                 net_type='lan'):
        self.log = Logger()
        self.host = host
        self.token = token
        self.event_handler = event_handler
        self.pool = HTTPConnectionPool(reactor, persistent=True)
        self.loc = None
        self.reconnect = False
        self.fail_count = 0
        if event_handler:
            self.reconnect = True
            d = self.request(
                headers={
                    'User-Agent': ['onDemand Rest Client'],
                    'Accept': ['text/event-stream']
                })
            d.addCallback(self.on_disconnect)

    def __getattr__(self, name):
        try:
            super(Rest, self).__getattr__(name)
        except AttributeError:
            return RestCall(self, name)

    def on_disconnect(self, reason):
        if not reason:
            reason = {'reason': 'no_message'}
        self.log.critical('disconnected: {reason}', reason=reason['reason'])
        if self.fail_count > 10:
            self.log.error('Max error count reached, aborting connection')

        def test_connectivity(count):
            if self.fail_count == count:
                self.fail_count = 0

        self.fail_count += 1
        c = self.fail_count
        reactor.callLater(10, test_connectivity, c)  # @UndefinedVariable
        if self.reconnect:
            d = self.request(
                headers={
                    'User-Agent': ['onDemand Rest Client'],
                    'Accept': ['text/event-stream']
                })
            d.addCallback(self.on_disconnect)

    def request(
            self,
            method='GET',
            path='',
            headers={
                'User-Agent': ['onDemand/1.0 (Rest_Client)'],
                'Accept': ['application/json']
            },
            body=None):

        data = None
        if self.loc:
            host = '/'.join((self.loc, path))
        else:
            host = '/'.join((self.host, path))
        if self.token:
            host += '?auth=' + self.token
        if body:
            headers.update({'Content-Type': ['application/json']})
            data = FileBodyProducer(StringIO(json.dumps(body)))
        agent = RedirectAgent(Agent(reactor, pool=self.pool))
        d = agent.request(method, host, Headers(headers), data)

        def cbFail(fail):

            if hasattr(fail.value, 'response'):
                if hasattr(fail.value.response, 'code'):
                    if fail.value.response.code == 307:
                        loc = fail.value.response.headers.getRawHeaders(
                            'location')
                        new = urlparse(loc[0])
                        newhost = '://'.join((new.scheme, new.netloc))
                        if newhost == self.host:
                            self.loc = None
                        else:
                            self.loc = newhost
                        self.log.debug('redirect: %s' % self.loc)
                        data = FileBodyProducer(StringIO(json.dumps(body)))
                        d = agent.request(method, loc[0], Headers(headers),
                                          data)
                        d.addCallbacks(cbRequest, cbFail)
                        return d
                    elif fail.value.response.code == 404 and self.loc:
                        self.loc = None
                        host = '/'.join((self.host, path))
                        if self.token:
                            host += '?auth=' + self.token
                        d = self.request(method, host, Headers(headers), body)
                        d.addCallbacks(cbRequest, cbFail)
                        return d
                else:
                    print(dir(fail.value))
                    print(fail.value.message)
                    print(fail.value.args)

            self.log.error('unhandled failure: %s -- %s' %
                           (fail.value.message, fail.value))

        def cbRequest(response):
            #  print 'Response version:', response.version
            #  print 'Response code:', response.code
            #  print 'Response phrase:', response.phrase
            #  print 'Response headers:'
            #  print pformat(list(response.headers.getAllRawHeaders()))
            finished = Deferred()
            response.deliverBody(RestHandle(finished, self.event_handler))
            return finished

        d.addCallbacks(cbRequest, cbFail)
        return d
Beispiel #29
0
class Miner(NucypherTokenActor):
    """
    Ursula baseclass for blockchain operations, practically carrying a pickaxe.
    """

    __current_period_sample_rate = 60 * 60  # seconds

    class MinerError(NucypherTokenActor.ActorError):
        pass

    def __init__(self,
                 is_me: bool,
                 start_staking_loop: bool = True,
                 economics: TokenEconomics = None,
                 *args,
                 **kwargs) -> None:
        super().__init__(*args, **kwargs)

        self.log = Logger("miner")
        self.is_me = is_me

        if not economics:
            economics = TokenEconomics()
        self.economics = economics

        #
        # Blockchain
        #

        if is_me:
            self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)

            # Staking Loop
            self.__current_period = None
            self._abort_on_staking_error = True
            self._staking_task = task.LoopingCall(self.heartbeat)

        else:
            self.token_agent = STRANGER_MINER

        self.miner_agent = MinerAgent(blockchain=self.blockchain)

        #
        # Stakes
        #

        self.__stakes = UNKNOWN_STAKES
        self.__start_time = NOT_STAKING
        self.__uptime_period = NOT_STAKING
        self.__terminal_period = UNKNOWN_STAKES

        self.__read_stakes()  # "load-in":  Read on-chain stakes

        # Start the callbacks if there are active stakes
        if (self.stakes is not NO_STAKES) and start_staking_loop:
            self.stake()

    #
    # Staking
    #

    @only_me
    def stake(self, confirm_now: bool = True) -> None:
        """
        High-level staking looping call initialization, this function aims
        to be safely called at any time - For example, it is okay to call
        this function multiple times within the same period.
        """
        # Get the last stake end period of all stakes
        terminal_period = max(stake.end_period for stake in self.stakes)

        if confirm_now:
            self.confirm_activity()

        # record start time and periods
        self.__start_time = maya.now()
        self.__uptime_period = self.miner_agent.get_current_period()
        self.__terminal_period = terminal_period
        self.__current_period = self.__uptime_period
        self.start_staking_loop()

    @property
    def last_active_period(self) -> int:
        period = self.miner_agent.get_last_active_period(
            address=self.checksum_address)
        return period

    @only_me
    def _confirm_period(self):

        onchain_period = self.miner_agent.get_current_period(
        )  # < -- Read from contract
        self.log.info("Checking for new period. Current period is {}".format(
            self.__current_period))

        # Check if the period has changed on-chain
        if self.__current_period != onchain_period:

            # Let's see how much time has passed
            # TODO: Follow-up actions for downtime
            missed_periods = onchain_period - self.last_active_period
            if missed_periods:
                self.log.warn(
                    f"MISSED CONFIRMATION - {missed_periods} missed staking confirmations detected!"
                )
                self.__read_stakes()  # Invalidate the stake cache

            # Check for stake expiration and exit
            stake_expired = self.__current_period >= self.__terminal_period
            if stake_expired:
                self.log.info('STOPPED STAKING - Final stake ended.')
                return True

            # Write to Blockchain
            self.confirm_activity()

            # Update local period cache
            self.__current_period = onchain_period
            self.log.info("Confirmed activity for period {}".format(
                self.__current_period))

    def heartbeat(self):
        """Used with LoopingCall"""
        try:
            self._confirm_period()
        except Exception:
            raise

    def _crash_gracefully(self, failure=None):
        """
        A facility for crashing more gracefully in the event that an exception is unhandled in a different thread.
        """
        self._crashed = failure
        failure.raiseException()

    def handle_staking_errors(self, *args, **kwargs):
        failure = args[0]
        if self._abort_on_staking_error:
            self.log.critical(
                "Unhandled error during node staking.  Attempting graceful crash."
            )
            reactor.callFromThread(self._crash_gracefully, failure=failure)
        else:
            self.log.warn("Unhandled error during node learning: {}".format(
                failure.getTraceback()))

    @only_me
    def start_staking_loop(self, now=True) -> None:
        if self._staking_task.running:
            return
        d = self._staking_task.start(
            interval=self.__current_period_sample_rate, now=now)
        d.addErrback(self.handle_staking_errors)
        self.log.info(
            f"STARTED STAKING - Scheduled end period is currently {self.__terminal_period}"
        )

    @property
    def is_staking(self) -> bool:
        """Checks if this Miner currently has active stakes / locked tokens."""
        return bool(self.stakes)

    def locked_tokens(self, periods: int = 0) -> NU:
        """Returns the amount of tokens this miner has locked for a given duration in periods."""
        raw_value = self.miner_agent.get_locked_tokens(
            miner_address=self.checksum_address, periods=periods)
        value = NU.from_nunits(raw_value)
        return value

    @property
    def current_stake(self) -> NU:
        """
        The total number of staked tokens, either locked or unlocked in the current period.
        """

        if self.stakes:
            return NU(sum(int(stake.value) for stake in self.stakes), 'NuNit')
        else:
            return NU.ZERO()

    @only_me
    def divide_stake(self,
                     stake_index: int,
                     target_value: NU,
                     additional_periods: int = None,
                     expiration: maya.MayaDT = None) -> tuple:

        # Calculate duration in periods
        if additional_periods and expiration:
            raise ValueError(
                "Pass the number of lock periods or an expiration MayaDT; not both."
            )

        # Select stake to divide from local cache
        try:
            current_stake = self.stakes[stake_index]
        except KeyError:
            if len(self.stakes):
                message = f"Cannot divide stake - No stake exists with index {stake_index}."
            else:
                message = "Cannot divide stake - There are no active stakes."
            raise Stake.StakingError(message)

        # Calculate stake duration in periods
        if expiration:
            additional_periods = datetime_to_period(
                datetime=expiration) - current_stake.end_period
            if additional_periods <= 0:
                raise Stake.StakingError(
                    f"New expiration {expiration} must be at least 1 period from the "
                    f"current stake's end period ({current_stake.end_period})."
                )

        # Do it already!
        modified_stake, new_stake = current_stake.divide(
            target_value=target_value, additional_periods=additional_periods)

        # Update staking cache
        self.__read_stakes()

        return modified_stake, new_stake

    @only_me
    def initialize_stake(self,
                         amount: NU,
                         lock_periods: int = None,
                         expiration: maya.MayaDT = None,
                         entire_balance: bool = False) -> Stake:
        """Create a new stake."""

        #
        # Duration
        #

        if lock_periods and expiration:
            raise ValueError(
                "Pass the number of lock periods or an expiration MayaDT; not both."
            )
        if expiration:
            lock_periods = calculate_period_duration(future_time=expiration)

        #
        # Value
        #

        if entire_balance and amount:
            raise ValueError("Specify an amount or entire balance, not both")
        if entire_balance:
            amount = self.token_balance
        if not self.token_balance >= amount:
            raise self.MinerError(
                f"Insufficient token balance ({self.token_agent}) for new stake initialization of {amount}"
            )

        # Ensure the new stake will not exceed the staking limit
        if (self.current_stake +
                amount) > self.economics.maximum_allowed_locked:
            raise Stake.StakingError(
                f"Cannot divide stake - Maximum stake value exceeded with a target value of {amount}."
            )

        #
        # Stake
        #

        # Write to blockchain
        new_stake = Stake.initialize_stake(miner=self,
                                           amount=amount,
                                           lock_periods=lock_periods)
        self.__read_stakes()  # Update local staking cache
        return new_stake

    #
    # Staking Cache
    #

    def __read_stakes(self) -> None:
        """Rewrite the local staking cache by reading on-chain stakes"""

        existing_records = len(self.__stakes)

        # Candidate replacement cache values
        onchain_stakes, terminal_period = list(), 0

        # Read from blockchain
        stakes_reader = self.miner_agent.get_all_stakes(
            miner_address=self.checksum_address)

        for onchain_index, stake_info in enumerate(stakes_reader):

            if not stake_info:
                # This stake index is empty on-chain
                onchain_stake = EMPTY_STAKING_SLOT

            else:
                # On-chain stake detected
                onchain_stake = Stake.from_stake_info(miner=self,
                                                      stake_info=stake_info,
                                                      index=onchain_index)

                # Search for the terminal period
                if onchain_stake.end_period > terminal_period:
                    terminal_period = onchain_stake.end_period

            # Store the replacement stake
            onchain_stakes.append(onchain_stake)

        # Commit the new stake and terminal values to the cache
        if not onchain_stakes:
            self.__stakes = NO_STAKES.bool_value(False)
        else:
            self.__terminal_period = terminal_period
            self.__stakes = onchain_stakes

        # Record most recent cache update
        self.__updated = maya.now()
        new_records = existing_records - len(self.__stakes)
        self.log.debug(
            f"Updated local staking cache ({new_records} new records).")

    def refresh_staking_cache(self) -> None:
        """Public staking cache invalidation method"""
        return self.__read_stakes()

    @property
    def stakes(self) -> List[Stake]:
        """Return all cached stake instances from the blockchain."""
        return self.__stakes

    #
    # Reward and Collection
    #

    @only_me
    def confirm_activity(self) -> str:
        """Miner rewarded for every confirmed period"""
        txhash = self.miner_agent.confirm_activity(
            node_address=self.checksum_address)
        self._transaction_cache.append((datetime.utcnow(), txhash))
        return txhash

    @only_me
    def mint(self) -> Tuple[str, str]:
        """Computes and transfers tokens to the miner's account"""
        mint_txhash = self.miner_agent.mint(node_address=self.checksum_address)
        self._transaction_cache.append((datetime.utcnow(), mint_txhash))
        return mint_txhash

    def calculate_reward(self) -> int:
        staking_reward = self.miner_agent.calculate_staking_reward(
            checksum_address=self.checksum_address)
        return staking_reward

    @only_me
    def collect_policy_reward(self,
                              collector_address=None,
                              policy_agent: PolicyAgent = None):
        """Collect rewarded ETH"""
        policy_agent = policy_agent if policy_agent is not None else PolicyAgent(
            blockchain=self.blockchain)

        withdraw_address = collector_address or self.checksum_address
        policy_reward_txhash = policy_agent.collect_policy_reward(
            collector_address=withdraw_address,
            miner_address=self.checksum_address)
        self._transaction_cache.append(
            (datetime.utcnow(), policy_reward_txhash))
        return policy_reward_txhash

    @only_me
    def collect_staking_reward(self) -> str:
        """Withdraw tokens rewarded for staking."""
        collection_txhash = self.miner_agent.collect_staking_reward(
            checksum_address=self.checksum_address)
        self._transaction_cache.append((datetime.utcnow(), collection_txhash))
        return collection_txhash
Beispiel #30
0
class NodeConfiguration(ABC):
    """
    'Sideways Engagement' of Character classes; a reflection of input parameters.
    """

    # Abstract
    _NAME = NotImplemented
    _CHARACTER_CLASS = NotImplemented
    CONFIG_FILENAME = NotImplemented
    DEFAULT_CONFIG_FILE_LOCATION = NotImplemented

    # Mode
    DEFAULT_OPERATING_MODE = 'decentralized'

    # Domains
    DEFAULT_DOMAIN = GLOBAL_DOMAIN

    # Serializers
    NODE_SERIALIZER = binascii.hexlify
    NODE_DESERIALIZER = binascii.unhexlify

    # System
    __CONFIG_FILE_EXT = '.config'
    __CONFIG_FILE_DESERIALIZER = json.loads
    TEMP_CONFIGURATION_DIR_PREFIX = "nucypher-tmp-"

    # Blockchain
    DEFAULT_PROVIDER_URI = 'tester://pyevm'

    # Registry
    __REGISTRY_NAME = 'contract_registry.json'
    REGISTRY_SOURCE = os.path.join(
        BASE_DIR, __REGISTRY_NAME)  # TODO: #461 Where will this be hosted?

    # Rest + TLS
    DEFAULT_REST_HOST = '127.0.0.1'
    DEFAULT_REST_PORT = 9151
    DEFAULT_DEVELOPMENT_REST_PORT = 10151
    __DEFAULT_TLS_CURVE = ec.SECP384R1
    __DEFAULT_NETWORK_MIDDLEWARE_CLASS = RestMiddleware

    class ConfigurationError(RuntimeError):
        pass

    class InvalidConfiguration(ConfigurationError):
        pass

    def __init__(
            self,

            # Base
            config_root: str = None,
            config_file_location: str = None,

            # Mode
            dev_mode: bool = False,
            federated_only: bool = False,

            # Identity
            is_me: bool = True,
            checksum_public_address: str = None,
            crypto_power: CryptoPower = None,

            # Keyring
            keyring: NucypherKeyring = None,
            keyring_dir: str = None,

            # Learner
            learn_on_same_thread: bool = False,
            abort_on_learning_error: bool = False,
            start_learning_now: bool = True,

            # REST
            rest_host: str = None,
            rest_port: int = None,

            # TLS
            tls_curve: EllipticCurve = None,
            certificate: Certificate = None,

            # Network
            domains: Set[str] = None,
            interface_signature: Signature = None,
            network_middleware: RestMiddleware = None,

            # Node Storage
            known_nodes: set = None,
            node_storage: NodeStorage = None,
            reload_metadata: bool = True,
            save_metadata: bool = True,

            # Blockchain
            poa: bool = False,
            provider_uri: str = None,

            # Registry
            registry_source: str = None,
            registry_filepath: str = None,
            import_seed_registry: bool = False  # TODO: needs cleanup
    ) -> None:

        # Logs
        self.log = Logger(self.__class__.__name__)

        #
        # REST + TLS (Ursula)
        #
        self.rest_host = rest_host or self.DEFAULT_REST_HOST
        default_port = (self.DEFAULT_DEVELOPMENT_REST_PORT
                        if dev_mode else self.DEFAULT_REST_PORT)
        self.rest_port = rest_port or default_port
        self.tls_curve = tls_curve or self.__DEFAULT_TLS_CURVE
        self.certificate = certificate

        self.interface_signature = interface_signature
        self.crypto_power = crypto_power

        #
        # Keyring
        #
        self.keyring = keyring or NO_KEYRING_ATTACHED
        self.keyring_dir = keyring_dir or UNINITIALIZED_CONFIGURATION

        # Contract Registry
        if import_seed_registry is True:
            registry_source = self.REGISTRY_SOURCE
            if not os.path.isfile(registry_source):
                message = "Seed contract registry does not exist at path {}.".format(
                    registry_filepath)
                self.log.debug(message)
                raise RuntimeError(message)
        self.__registry_source = registry_source or self.REGISTRY_SOURCE
        self.registry_filepath = registry_filepath or UNINITIALIZED_CONFIGURATION

        #
        # Configuration
        #
        self.config_file_location = config_file_location or UNINITIALIZED_CONFIGURATION
        self.config_root = UNINITIALIZED_CONFIGURATION

        #
        # Mode
        #
        self.federated_only = federated_only
        self.__dev_mode = dev_mode

        if self.__dev_mode:
            self.__temp_dir = UNINITIALIZED_CONFIGURATION
            self.node_storage = ForgetfulNodeStorage(
                federated_only=federated_only, character_class=self.__class__)
        else:
            self.__temp_dir = LIVE_CONFIGURATION
            self.config_root = config_root or DEFAULT_CONFIG_ROOT
            self._cache_runtime_filepaths()
            self.node_storage = node_storage or LocalFileBasedNodeStorage(
                federated_only=federated_only, config_root=self.config_root)

        # Domains
        self.domains = domains or {self.DEFAULT_DOMAIN}

        #
        # Identity
        #
        self.is_me = is_me
        self.checksum_public_address = checksum_public_address

        if self.is_me is True or dev_mode is True:
            # Self
            if self.checksum_public_address and dev_mode is False:
                self.attach_keyring()
            self.network_middleware = network_middleware or self.__DEFAULT_NETWORK_MIDDLEWARE_CLASS(
            )

        else:
            # Stranger
            self.node_storage = STRANGER_CONFIGURATION
            self.keyring_dir = STRANGER_CONFIGURATION
            self.keyring = STRANGER_CONFIGURATION
            self.network_middleware = STRANGER_CONFIGURATION
            if network_middleware:
                raise self.ConfigurationError(
                    "Cannot configure a stranger to use network middleware.")

        #
        # Learner
        #
        self.learn_on_same_thread = learn_on_same_thread
        self.abort_on_learning_error = abort_on_learning_error
        self.start_learning_now = start_learning_now
        self.save_metadata = save_metadata
        self.reload_metadata = reload_metadata

        self.__fleet_state = FleetStateTracker()
        known_nodes = known_nodes or set()
        if known_nodes:
            self.known_nodes._nodes.update(
                {node.checksum_public_address: node
                 for node in known_nodes})
            self.known_nodes.record_fleet_state(
            )  # TODO: Does this call need to be here?

        #
        # Blockchain
        #
        self.poa = poa
        self.provider_uri = provider_uri or self.DEFAULT_PROVIDER_URI

        self.blockchain = NO_BLOCKCHAIN_CONNECTION
        self.accounts = NO_BLOCKCHAIN_CONNECTION
        self.token_agent = NO_BLOCKCHAIN_CONNECTION
        self.miner_agent = NO_BLOCKCHAIN_CONNECTION
        self.policy_agent = NO_BLOCKCHAIN_CONNECTION

        #
        # Development Mode
        #
        if dev_mode:

            # Ephemeral dev settings
            self.abort_on_learning_error = True
            self.save_metadata = False
            self.reload_metadata = False

            # Generate one-time alphanumeric development password
            alphabet = string.ascii_letters + string.digits
            password = ''.join(secrets.choice(alphabet) for _ in range(32))

            # Auto-initialize
            self.initialize(password=password,
                            import_registry=import_seed_registry)

    def __call__(self, *args, **kwargs):
        return self.produce(*args, **kwargs)

    @classmethod
    def generate(cls, password: str, no_registry: bool, *args,
                 **kwargs) -> 'UrsulaConfiguration':
        """Shortcut: Hook-up a new initial installation and write configuration file to the disk"""
        ursula_config = cls(dev_mode=False, is_me=True, *args, **kwargs)
        ursula_config.__write(password=password, no_registry=no_registry)
        return ursula_config

    def __write(self, password: str, no_registry: bool):
        _new_installation_path = self.initialize(password=password,
                                                 import_registry=no_registry)
        _configuration_filepath = self.to_configuration_file(
            filepath=self.config_file_location)

    def cleanup(self) -> None:
        if self.__dev_mode:
            self.__temp_dir.cleanup()

    @property
    def dev_mode(self):
        return self.__dev_mode

    @property
    def known_nodes(self):
        return self.__fleet_state

    def connect_to_blockchain(self, recompile_contracts: bool = False):
        if self.federated_only:
            raise NodeConfiguration.ConfigurationError(
                "Cannot connect to blockchain in federated mode")

        self.blockchain = Blockchain.connect(provider_uri=self.provider_uri,
                                             compile=recompile_contracts,
                                             poa=self.poa)

        self.accounts = self.blockchain.interface.w3.eth.accounts
        self.log.debug("Established connection to provider {}".format(
            self.blockchain.interface.provider_uri))

    def connect_to_contracts(self) -> None:
        """Initialize contract agency and set them on config"""
        self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)
        self.miner_agent = MinerAgent(blockchain=self.blockchain)
        self.policy_agent = PolicyAgent(blockchain=self.blockchain)
        self.log.debug("Established connection to nucypher contracts")

    def read_known_nodes(self):
        known_nodes = self.node_storage.all(federated_only=self.federated_only)
        known_nodes = {
            node.checksum_public_address: node
            for node in known_nodes
        }
        self.known_nodes._nodes.update(known_nodes)
        self.known_nodes.record_fleet_state()
        return self.known_nodes

    def forget_nodes(self) -> None:
        self.node_storage.clear()
        message = "Removed all stored node node metadata and certificates"
        self.log.debug(message)

    def destroy(self, force: bool = False, logs: bool = True) -> None:

        # TODO: Further confirm this is a nucypher dir first! (in-depth measure)

        if logs is True or force:
            shutil.rmtree(USER_LOG_DIR, ignore_errors=True)
        try:
            shutil.rmtree(self.config_root, ignore_errors=force)
        except FileNotFoundError:
            raise FileNotFoundError("No such directory {}".format(
                self.config_root))

    def generate_parameters(self, **overrides) -> dict:
        merged_parameters = {
            **self.static_payload,
            **self.dynamic_payload,
            **overrides
        }
        non_init_params = ('config_root', 'poa', 'provider_uri')
        character_init_params = filter(lambda t: t[0] not in non_init_params,
                                       merged_parameters.items())
        return dict(character_init_params)

    def produce(self, **overrides):
        """Initialize a new character instance and return it."""
        merged_parameters = self.generate_parameters(**overrides)
        character = self._CHARACTER_CLASS(**merged_parameters)
        return character

    @staticmethod
    def _read_configuration_file(filepath: str) -> dict:
        try:
            with open(filepath, 'r') as file:
                raw_contents = file.read()
                payload = NodeConfiguration.__CONFIG_FILE_DESERIALIZER(
                    raw_contents)
        except FileNotFoundError as e:
            raise  # TODO: Do we need better exception handling here?
        return payload

    @classmethod
    def from_configuration_file(cls,
                                filepath: str = None,
                                **overrides) -> 'NodeConfiguration':
        """Initialize a NodeConfiguration from a JSON file."""

        from nucypher.config.storages import NodeStorage
        node_storage_subclasses = {
            storage._name: storage
            for storage in NodeStorage.__subclasses__()
        }

        if filepath is None:
            filepath = cls.DEFAULT_CONFIG_FILE_LOCATION

        # Read from disk
        payload = cls._read_configuration_file(filepath=filepath)

        # Initialize NodeStorage subclass from file (sub-configuration)
        storage_payload = payload['node_storage']
        storage_type = storage_payload[NodeStorage._TYPE_LABEL]
        storage_class = node_storage_subclasses[storage_type]
        node_storage = storage_class.from_payload(
            payload=storage_payload,
            # character_class=cls._CHARACTER_CLASS,  # TODO: Do not pass this here - Always Use Ursula
            federated_only=payload['federated_only'],
            serializer=cls.NODE_SERIALIZER,
            deserializer=cls.NODE_DESERIALIZER)

        # Deserialize domains to UTF-8 bytestrings
        domains = list(domain.encode() for domain in payload['domains'])
        payload.update(dict(node_storage=node_storage, domains=domains))

        # Filter out Nones from overrides to detect, well, overrides
        overrides = {k: v for k, v in overrides.items() if v is not None}

        # Instantiate from merged params
        node_configuration = cls(**{**payload, **overrides})

        return node_configuration

    def to_configuration_file(self, filepath: str = None) -> str:
        """Write the static_payload to a JSON file."""
        if filepath is None:
            filename = '{}{}'.format(self._NAME.lower(),
                                     self.__CONFIG_FILE_EXT)
            filepath = os.path.join(self.config_root, filename)

        payload = self.static_payload
        del payload['is_me']  # TODO

        # Serialize domains
        domains = list(str(d) for d in self.domains)

        # Save node connection data
        payload.update(
            dict(node_storage=self.node_storage.payload(), domains=domains))

        with open(filepath, 'w') as config_file:
            config_file.write(json.dumps(payload, indent=4))
        return filepath

    def validate(self, config_root: str, no_registry=False) -> bool:
        # Top-level
        if not os.path.exists(config_root):
            raise self.ConfigurationError(
                'No configuration directory found at {}.'.format(config_root))

        # Sub-paths
        filepaths = self.runtime_filepaths
        if no_registry:
            del filepaths['registry_filepath']

        for field, path in filepaths.items():
            if not os.path.exists(path):
                message = 'Missing configuration file or directory: {}.'
                if 'registry' in path:
                    message += ' Did you mean to pass --federated-only?'
                raise NodeConfiguration.InvalidConfiguration(
                    message.format(path))
        return True

    @property
    def static_payload(self) -> dict:
        """Exported static configuration values for initializing Ursula"""
        payload = dict(
            config_root=self.config_root,

            # Identity
            is_me=self.is_me,
            federated_only=self.federated_only,
            checksum_public_address=self.checksum_public_address,
            keyring_dir=self.keyring_dir,

            # Behavior
            domains=self.domains,  # From Set
            learn_on_same_thread=self.learn_on_same_thread,
            abort_on_learning_error=self.abort_on_learning_error,
            start_learning_now=self.start_learning_now,
            save_metadata=self.save_metadata,
        )

        if not self.federated_only:
            payload.update(dict(provider_uri=self.provider_uri, poa=self.poa))

        return payload

    @property
    def dynamic_payload(self, **overrides) -> dict:
        """Exported dynamic configuration values for initializing Ursula"""

        if self.reload_metadata:
            known_nodes = self.node_storage.all(
                federated_only=self.federated_only)
            known_nodes = {
                node.checksum_public_address: node
                for node in known_nodes
            }
            self.known_nodes._nodes.update(known_nodes)
        self.known_nodes.record_fleet_state()

        payload = dict(network_middleware=self.network_middleware
                       or self.__DEFAULT_NETWORK_MIDDLEWARE_CLASS(),
                       known_nodes=self.known_nodes,
                       node_storage=self.node_storage,
                       crypto_power_ups=self.derive_node_power_ups() or None)

        if not self.federated_only:
            self.connect_to_blockchain(recompile_contracts=False)
            payload.update(blockchain=self.blockchain)

        if overrides:
            self.log.debug(
                "Overrides supplied to dynamic payload for {}".format(
                    self.__class__.__name__))
            payload.update(overrides)

        return payload

    @property
    def runtime_filepaths(self):
        filepaths = dict(config_root=self.config_root,
                         keyring_dir=self.keyring_dir,
                         registry_filepath=self.registry_filepath)
        return filepaths

    @classmethod
    def generate_runtime_filepaths(cls, config_root: str) -> dict:
        """Dynamically generate paths based on configuration root directory"""
        filepaths = dict(
            config_root=config_root,
            config_file_location=os.path.join(config_root,
                                              cls.CONFIG_FILENAME),
            keyring_dir=os.path.join(config_root, 'keyring'),
            registry_filepath=os.path.join(config_root,
                                           NodeConfiguration.__REGISTRY_NAME))
        return filepaths

    def _cache_runtime_filepaths(self) -> None:
        """Generate runtime filepaths and cache them on the config object"""
        filepaths = self.generate_runtime_filepaths(
            config_root=self.config_root)
        for field, filepath in filepaths.items():
            if getattr(self, field) is UNINITIALIZED_CONFIGURATION:
                setattr(self, field, filepath)

    def derive_node_power_ups(self) -> List[CryptoPowerUp]:
        power_ups = list()
        if self.is_me and not self.dev_mode:
            for power_class in self._CHARACTER_CLASS._default_crypto_powerups:
                power_up = self.keyring.derive_crypto_power(power_class)
                power_ups.append(power_up)
        return power_ups

    def initialize(
        self,
        password: str,
        import_registry: bool = True,
    ) -> str:
        """Initialize a new configuration."""

        #
        # Create Config Root
        #
        if self.__dev_mode:
            self.__temp_dir = TemporaryDirectory(
                prefix=self.TEMP_CONFIGURATION_DIR_PREFIX)
            self.config_root = self.__temp_dir.name
        else:
            try:
                os.mkdir(self.config_root, mode=0o755)

            except FileExistsError:
                if os.listdir(self.config_root):
                    message = "There are existing files located at {}".format(
                        self.config_root)
                    raise self.ConfigurationError(message)

            except FileNotFoundError:
                os.makedirs(self.config_root, mode=0o755)

        #
        # Create Config Subdirectories
        #
        self._cache_runtime_filepaths()
        try:

            # Node Storage
            self.node_storage.initialize()

            # Keyring
            if not self.dev_mode:
                os.mkdir(
                    self.keyring_dir,
                    mode=0o700)  # keyring TODO: Keyring backend entry point
                self.write_keyring(password=password)

            # Registry
            if import_registry and not self.federated_only:
                self.write_registry(
                    output_filepath=self.registry_filepath,  # type: str
                    source=self.__registry_source,  # type: str
                    blank=import_registry)  # type: bool

        except FileExistsError:
            existing_paths = [
                os.path.join(self.config_root, f)
                for f in os.listdir(self.config_root)
            ]
            message = "There are pre-existing files at {}: {}".format(
                self.config_root, existing_paths)
            self.log.critical(message)
            raise NodeConfiguration.ConfigurationError(message)

        if not self.__dev_mode:
            self.validate(config_root=self.config_root,
                          no_registry=import_registry or self.federated_only)

        # Success
        message = "Created nucypher installation files at {}".format(
            self.config_root)
        self.log.debug(message)
        return self.config_root

    def attach_keyring(self,
                       checksum_address: str = None,
                       *args,
                       **kwargs) -> None:
        if self.keyring is not NO_KEYRING_ATTACHED:
            if self.keyring.checksum_address != (checksum_address or
                                                 self.checksum_public_address):
                raise self.ConfigurationError(
                    "There is already a keyring attached to this configuration."
                )
            return

        if (checksum_address or self.checksum_public_address) is None:
            raise self.ConfigurationError(
                "No account specified to unlock keyring")

        self.keyring = NucypherKeyring(
            keyring_root=self.keyring_dir,  # type: str
            account=checksum_address
            or self.checksum_public_address,  # type: str
            *args,
            **kwargs)

    def write_keyring(self, password: str,
                      **generation_kwargs) -> NucypherKeyring:

        self.keyring = NucypherKeyring.generate(password=password,
                                                keyring_root=self.keyring_dir,
                                                **generation_kwargs)
        # Operating mode switch TODO: #466
        if self.federated_only:
            self.checksum_public_address = self.keyring.federated_address
        else:
            self.checksum_public_address = self.keyring.checksum_address

        return self.keyring

    def write_registry(self,
                       output_filepath: str = None,
                       source: str = None,
                       force: bool = False,
                       blank=False) -> str:

        if force and os.path.isfile(output_filepath):
            raise self.ConfigurationError(
                'There is an existing file at the registry output_filepath {}'.
                format(output_filepath))

        output_filepath = output_filepath or self.registry_filepath
        source = source or self.REGISTRY_SOURCE

        if not blank and not self.dev_mode:
            # Validate Registry
            with open(source, 'r') as registry_file:
                try:
                    json.loads(registry_file.read())
                except JSONDecodeError:
                    message = "The registry source {} is not valid JSON".format(
                        source)
                    self.log.critical(message)
                    raise self.ConfigurationError(message)
                else:
                    self.log.debug(
                        "Source registry {} is valid JSON".format(source))

        else:
            self.log.warn("Writing blank registry")
            open(output_filepath, 'w').close()  # write blank

        self.log.debug(
            "Successfully wrote registry to {}".format(output_filepath))
        return output_filepath
Beispiel #31
0
def cli():
    log = Logger()

    parser = argparse.ArgumentParser(prog=__version__.package)

    parser.add_argument('--version',
                        action='version',
                        version=__version__.public())
    parser.add_argument(
        '--secrets',
        help=
        'Directory that contains files named "username" and "password". These files should contain your Nest username and password.'
    )
    parser.add_argument('--username', help='Your Nest username.')
    parser.add_argument('--password', help='Your Nest password.')
    parser.add_argument(
        '--endpoint',
        default=default_endpoint,
        help=
        'Twisted endpoint declaration for internal web service. Default is "{}".'
        .format(default_endpoint))

    options = parser.parse_args()

    output = textFileLogObserver(sys.stderr)
    globalLogBeginner.beginLoggingTo([output])

    username = None
    password = None

    if options.secrets is not None:
        if not os.path.isdir(options.secrets):
            log.critical('{secrets:} is not a directory',
                         secrets=options.secrets)
            sys.exit(1)

        username_file = os.path.join(options.secrets, 'username')
        password_file = os.path.join(options.secrets, 'password')

        try:
            with open(username_file, mode='r', encoding='utf-8') as f:
                username = f.read().strip()

        except FileNotFoundError:
            log.error(
                'Secrets path specified but username file {username_file:} not found!',
                username_file=username_file)

        except PermissionError:
            log.error(
                'Unable to open username file {username_file:} for reading!',
                username_file=username_file)

        try:
            with open(password_file, mode='r', encoding='utf-8') as f:
                password = f.read().strip()

        except FileNotFoundError:
            log.error(
                'Secrets path specified but password file {password_file:} not found!',
                password_file=password_file)

        except PermissionError:
            log.error(
                'Unable to open password file {password_file:} for reading!',
                password_file=password_file)

    if options.username is not None:
        username = options.username

    if options.password is not None:
        password = options.password

    if username is None:
        log.critical('Username must be specified!')
        sys.exit(1)

    if password is None:
        log.critical('Password must be specified!')
        sys.exit(1)

    m = Main(reactor, username, password, options.endpoint)
    reactor.run()