Exemple #1
0
def start_upload_server():
	import argparse

	from twisted.internet import reactor
	from twisted.logger import Logger, globalLogPublisher, STDLibLogObserver
	from twisted.web.server import Site
	from twisted.web.resource import Resource

	from cheesepi.server.upload import UploadHandler

	# Argument parsing
	parser = argparse.ArgumentParser()
	parser.add_argument('--port', type=int, default=18090,
	                    help='Port to listen on')
	args = parser.parse_args()

	init_logging()

	# Make twisted logging write to pythons logging module
	globalLogPublisher.addObserver(STDLibLogObserver(name="cheesepi.server.upload"))

	# Use twisted logger when in twisted
	log = Logger()

	root = Resource()
	root.putChild("upload", UploadHandler())
	upload_server = Site(root)

	reactor.listenTCP(args.port, upload_server)
	log.info("Starting upload server on port %d..." % args.port)
	reactor.run()
Exemple #2
0
class LoggingProcessProtocol(ProcessProtocol, object):
    """
    A ProcessProtocol that logs all output to a file
    """
    def __init__(self, commandname, maxbackups=3):
        log_name = commandname + ".log"
        log_dir = os.path.join(fs.adirs.user_log_dir, "processes")
        if not os.path.isdir(log_dir):
            os.makedirs(log_dir)
        log_name = os.path.join(log_dir, log_name)
        _backup_logs(log_name, maxbackups)
        self.log = Logger(observer=textFileLogObserver(io.open(log_name, "w")),
                          namespace="")
        super(LoggingProcessProtocol, self).__init__()

    def connectionMade(self):
        self.finished = defer.Deferred()

    def outReceived(self, data):
        self.log.info("{data}", data=bytes_to_str(data.strip()))

    def errReceived(self, data):
        self.log.error("{data}", data=bytes_to_str(data.strip()))

    def processEnded(self, reason):
        if reason.check(ProcessDone):
            self.finished.callback(True)
            self.log.info("Process finished without error")
        else:
            self.finished.errback(reason)
            self.log.error("Process ended with error: {reason!r}",
                           reason=reason)
Exemple #3
0
def start_control_server():
	import argparse

	from twisted.internet import reactor
	from twisted.logger import Logger, globalLogPublisher, STDLibLogObserver

	from cheesepi.server.control import (CheeseRPCServerFactory,
	                                     CheeseRPCServer)
	from cheesepi.server.storage.mongo import MongoDAO

	# Argument parsing
	parser = argparse.ArgumentParser()
	parser.add_argument('--port', type=int, default=18080,
	                    help='Port to listen on')
	args = parser.parse_args()

	init_logging()

	# Make twisted logging write to pythons logging module
	globalLogPublisher.addObserver(STDLibLogObserver(name="cheesepi.server.control"))

	# Use twisted logger when in twisted
	log = Logger()

	# Logging
	#log = Logger()
	#globalLogPublisher.addObserver(PrintingObserver())

	#dao = MongoDAO()
	dao = MongoDAO('localhost', 27017)
	control_server = CheeseRPCServer(dao).getStreamFactory(CheeseRPCServerFactory)

	reactor.listenTCP(args.port, control_server)
	log.info("Starting control server on port %d..." % args.port)
	reactor.run()
Exemple #4
0
    def test_logger_namespace(self):
        """
        A `twisted.logger.Logger` with a namespace gets that namespace as a prefix.
        """
        fout = StringIO()
        log = Logger(namespace="ns", observer=FileLogObserver(fout, formatForSystemd))

        log.info("info\n{more}", more="info")
        log.error("err")

        self.assertEqual((
            "<6>[ns] info\n"
            "<6>  info\n"
            "<3>[ns] err\n"
        ), fout.getvalue())
    def _relaying_test(self, eliot_logger, observer):
        """
        Publish an event using ``twisted.logger`` with ``observer`` hooked up and
        assert that the event ends up being seen by ``eliot_logger``.
        """
        twisted_logger = TwistedLogger(observer=observer)
        twisted_logger.info("Hello, world.")

        [event] = eliot_logger.messages
        self.assertThat(
            event,
            ContainsDict(dict(
                # A couple things from the Twisted side of the fence.
                log_namespace=Equals("lae_util.test.test_eliot"),
                log_format=Equals("Hello, world."),
                # And also some Eliot stuff.
                task_uuid=IsInstance(unicode),
                task_level=IsInstance(list),
            )),
        )
Exemple #6
0
class MFTests(object):

    def __init__(self):
        self.log = Logger()

    def start_static(self):
        resource = File(os.getcwd() + '/tests/pages')
        factory = Site(resource)
        endpoint = endpoints.TcP4ServerEndpoint(reactor, 0)
        endpoint.listen(factory)
        # reactor.run()

    def send_request(self):
        pass

    def stop_callback(self, none):
        reactor.stop()

    def test_log_handler(self):
        handler = LogHandler()
        self.log.info('Test msg with {parameter} is OK', parameter="value")
        self.log.error('Test error with {parameter} is OK', parameter="value")
        self.log.error('Test error with {parameter} (isError={isError}) is OK', parameter="value", isError=False)
        self.log.error('Test error with {parameter} (isError={isError}) is OK', parameter="value", isError=True)

        d = defer.Deferred()
        reactor.callLater(0, d.callback, None)
        d.addCallback(self.stop_callback)
        d.addErrback(lambda err: print("callback error: %s\ncallback traceback: %s" % (err.getErrorMessage(), err.getTraceback())))

        reactor.run()

    def test_server(self):
        d = defer.Deferred()
        reactor.callLater(3, d.callback, None)
        d.addCallback(self.stop_callback)
        #d.addCallback(self.send_request)
        d.addErrback(lambda err: print("callback error: %s\ncallback traceback: %s" % (err.getErrorMessage(), err.getTraceback())))

        Server(port=1234, db_creds=None, snapshot_dir='~/tmp', user_agent='', debug=False).run()
Exemple #7
0
class Web3Client:

    is_local = False

    GETH = 'Geth'
    PARITY = 'Parity'
    ALT_PARITY = 'Parity-Ethereum'
    GANACHE = 'EthereumJS TestRPC'
    ETHEREUM_TESTER = 'EthereumTester'  # (PyEVM)
    SYNC_TIMEOUT_DURATION = 60  # seconds to wait for various blockchain syncing endeavors
    PEERING_TIMEOUT = 30
    SYNC_SLEEP_DURATION = 5

    class ConnectionNotEstablished(RuntimeError):
        pass

    class SyncTimeout(RuntimeError):
        pass

    def __init__(self, w3, node_technology: str, version: str, platform: str,
                 backend: str):

        self.w3 = w3
        self.node_technology = node_technology
        self.node_version = version
        self.platform = platform
        self.backend = backend
        self.log = Logger(self.__class__.__name__)

    @classmethod
    def _get_variant(cls, w3):
        return cls

    @classmethod
    def from_w3(cls, w3: Web3) -> 'Web3Client':
        """

        Client version strings
        ======================

        Geth    -> 'Geth/v1.4.11-stable-fed692f6/darwin/go1.7'
        Parity  -> 'Parity-Ethereum/v2.5.1-beta-e0141f8-20190510/x86_64-linux-gnu/rustc1.34.1'
        Ganache -> 'EthereumJS TestRPC/v2.1.5/ethereum-js'
        PyEVM   -> 'EthereumTester/0.1.0b39/linux/python3.6.7'
        """
        clients = {

            # Geth
            cls.GETH: GethClient,

            # Parity
            cls.PARITY: ParityClient,
            cls.ALT_PARITY: ParityClient,

            # Test Clients
            cls.GANACHE: GanacheClient,
            cls.ETHEREUM_TESTER: EthereumTesterClient,
        }

        try:
            client_data = w3.clientVersion.split('/')
            node_technology = client_data[0]
            ClientSubclass = clients[node_technology]

        except (ValueError, IndexError):
            raise ValueError(
                f"Invalid client version string. Got '{w3.clientVersion}'")

        except KeyError:
            raise NotImplementedError(
                f'{w3.clientVersion} is not a supported ethereum client')

        client_kwargs = {
            'node_technology': node_technology,
            'version': client_data[1],
            'backend': client_data[-1],
            'platform': client_data[2]
            if len(client_data) == 4 else None  # Platform is optional
        }

        instance = ClientSubclass._get_variant(w3)(w3, **client_kwargs)
        return instance

    @property
    def peers(self):
        raise NotImplementedError

    @property
    def chain_name(self) -> str:
        if not self.is_local:
            return PUBLIC_CHAINS[int(self.chain_id)]
        name = LOCAL_CHAINS.get(self.chain_id, UNKNOWN_DEVELOPMENT_CHAIN_ID)
        return name

    @property
    def syncing(self) -> Union[bool, dict]:
        return self.w3.eth.syncing

    def lock_account(self, address) -> bool:
        if self.is_local:
            return True
        return NotImplemented

    def unlock_account(self, address, password, duration=None) -> bool:
        if self.is_local:
            return True
        return NotImplemented

    @property
    def is_connected(self):
        return self.w3.isConnected()

    @property
    def etherbase(self) -> str:
        return self.w3.eth.accounts[0]

    @property
    def accounts(self):
        return self.w3.eth.accounts

    def get_balance(self, account):
        return self.w3.eth.getBalance(account)

    def inject_middleware(self, middleware, **kwargs):
        self.w3.middleware_onion.inject(middleware, **kwargs)

    @property
    def chain_id(self) -> int:
        try:
            # from hex-str
            return int(self.w3.eth.chainId, 16)
        except TypeError:
            # from str
            return int(self.w3.eth.chainId)

    @property
    def net_version(self) -> int:
        return int(self.w3.net.version)

    def get_contract(self, **kwargs):
        return self.w3.eth.contract(**kwargs)

    @property
    def gas_price(self):
        return self.w3.eth.gasPrice

    @property
    def block_number(self) -> int:
        return self.w3.eth.blockNumber

    @property
    def coinbase(self) -> str:
        return self.w3.eth.coinbase

    def wait_for_receipt(self, transaction_hash: str, timeout: int) -> dict:
        receipt = self.w3.eth.waitForTransactionReceipt(
            transaction_hash=transaction_hash, timeout=timeout)
        return receipt

    def sign_transaction(self, transaction: dict):
        raise NotImplementedError

    def get_transaction(self, transaction_hash) -> str:
        return self.w3.eth.getTransaction(transaction_hash=transaction_hash)

    def send_transaction(self, transaction: dict) -> str:
        return self.w3.eth.sendTransaction(transaction=transaction)

    def send_raw_transaction(self, transaction: bytes) -> str:
        return self.w3.eth.sendRawTransaction(raw_transaction=transaction)

    def sign_message(self, account: str, message: bytes) -> str:
        """
        Calls the appropriate signing function for the specified account on the
        backend. If the backend is based on eth-tester, then it uses the
        eth-tester signing interface to do so.
        """
        return self.w3.eth.sign(account, data=message)

    def _has_latest_block(self):
        # check that our local chain data is up to date
        return (time.time() - self.w3.eth.getBlock(
            self.w3.eth.blockNumber)['timestamp']) < 30

    def sync(self, timeout: int = 120, quiet: bool = False):

        # Provide compatibility with local chains
        if self.is_local:
            return

        # Record start time for timeout calculation
        now = maya.now()
        start_time = now

        def check_for_timeout(t):
            last_update = maya.now()
            duration = (last_update - start_time).total_seconds()
            if duration > t:
                raise self.SyncTimeout

        while not self._has_latest_block():
            # Check for ethereum peers
            self.log.info(
                f"Waiting for Ethereum peers ({len(self.peers)} known)")
            while not self.peers:
                time.sleep(0)
                check_for_timeout(t=self.PEERING_TIMEOUT)

            # Wait for sync start
            self.log.info(
                f"Waiting for {self.chain_name.capitalize()} chain synchronization to begin"
            )
            while not self.syncing:
                time.sleep(0)
                check_for_timeout(t=self.SYNC_TIMEOUT_DURATION * 2)

            while True:
                #  TODO:  Should this timeout eventually?
                syncdata = self.syncing
                if not syncdata:
                    return False

                self.log.info(
                    f"Syncing {syncdata['currentBlock']}/{syncdata['highestBlock']}"
                )
                time.sleep(self.SYNC_SLEEP_DURATION)
                yield syncdata

        return True
Exemple #8
0
class BlockchainInterface:
    """
    Interacts with a solidity compiler and a registry in order to instantiate compiled
    ethereum contracts with the given web3 provider backend.
    """

    TIMEOUT = 180  # seconds
    NULL_ADDRESS = '0x' + '0' * 40

    process = NO_PROVIDER_PROCESS.bool_value(False)
    Web3 = Web3

    _contract_factory = VersionedContract

    class InterfaceError(Exception):
        pass

    class NoProvider(InterfaceError):
        pass

    class UnsupportedProvider(InterfaceError):
        pass

    class ConnectionFailed(InterfaceError):
        pass

    class UnknownContract(InterfaceError):
        pass

    def __init__(self,
                 poa: bool = True,
                 light: bool = False,
                 provider_process: NuCypherGethProcess = NO_PROVIDER_PROCESS,
                 provider_uri: str = NO_BLOCKCHAIN_CONNECTION,
                 provider: Web3Providers = NO_BLOCKCHAIN_CONNECTION):
        """
        A blockchain "network interface"; The circumflex wraps entirely around the bounds of
        contract operations including compilation, deployment, and execution.

        TODO: #1502 - Move me to docs.

         Filesystem          Configuration           Node              Client                  EVM
        ================ ====================== =============== =====================  ===========================

         Solidity Files -- SolidityCompiler -                      --- HTTPProvider ------ ...
                                            |                    |
                                            |                    |
                                            |                    |
                                            - *BlockchainInterface* -- IPCProvider ----- External EVM (geth, parity...)
                                                       |         |
                                                       |         |
                                                 TestProvider ----- EthereumTester -------------
                                                                                                |
                                                                                                |
                                                                                        PyEVM (Development Chain)

         ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

         Runtime Files --                 --BlockchainInterface ----> Registry
                        |                |             ^
                        |                |             |
                        |                |             |
         Key Files ------ CharacterConfiguration     Agent                          ... (Contract API)
                        |                |             ^
                        |                |             |
                        |                |             |
                        |                |           Actor                          ...Blockchain-Character API)
                        |                |             ^
                        |                |             |
                        |                |             |
         Config File ---                  --------- Character                       ... (Public API)
                                                       ^
                                                       |
                                                     Human


        The Blockchain is the junction of the solidity compiler, a contract registry, and a collection of
        web3 network providers as a means of interfacing with the ethereum blockchain to execute
        or deploy contract code on the network.


        Compiler and Registry Usage
        -----------------------------

        Contracts are freshly re-compiled if an instance of SolidityCompiler is passed; otherwise,
        The registry will read contract data saved to disk that is be used to retrieve contact address and op-codes.
        Optionally, A registry instance can be passed instead.


        Provider Usage
        ---------------
        https: // github.com / ethereum / eth - tester     # available-backends


        * HTTP Provider - Web3 HTTP provider, typically JSON RPC 2.0 over HTTP
        * Websocket Provider - Web3 WS provider, typically JSON RPC 2.0 over WS, supply endpoint uri and websocket=True
        * IPC Provider - Web3 File based IPC provider transported over standard I/O
        * Custom Provider - A pre-initialized web3.py provider instance to attach to this interface

        """

        self.log = Logger('Blockchain')
        self.poa = poa
        self.provider_uri = provider_uri
        self._provider = provider
        self._provider_process = provider_process
        self.w3 = NO_BLOCKCHAIN_CONNECTION
        self.client = NO_BLOCKCHAIN_CONNECTION  # type: Web3Client
        self.transacting_power = READ_ONLY_INTERFACE
        self.is_light = light

    def __repr__(self):
        r = '{name}({uri})'.format(name=self.__class__.__name__,
                                   uri=self.provider_uri)
        return r

    @classmethod
    def from_dict(cls, payload: dict, **overrides) -> 'BlockchainInterface':
        payload.update({k: v for k, v in overrides.items() if v is not None})
        blockchain = cls(**payload)
        return blockchain

    def to_dict(self) -> dict:
        payload = dict(provider_uri=self.provider_uri,
                       poa=self.poa,
                       light=self.is_light)
        return payload

    @property
    def is_connected(self) -> bool:
        """
        https://web3py.readthedocs.io/en/stable/__provider.html#examples-using-automated-detection
        """
        if self.client is NO_BLOCKCHAIN_CONNECTION:
            return False
        return self.client.is_connected

    def attach_middleware(self):

        # For use with Proof-Of-Authority test-blockchains
        if self.poa is True:
            self.log.debug('Injecting POA middleware at layer 0')
            self.client.inject_middleware(geth_poa_middleware, layer=0)

    def connect(self):

        # Spawn child process
        if self._provider_process:
            self._provider_process.start()
            provider_uri = self._provider_process.provider_uri(scheme='file')
        else:
            provider_uri = self.provider_uri
            self.log.info(
                f"Using external Web3 Provider '{self.provider_uri}'")

        # Attach Provider
        self._attach_provider(provider=self._provider,
                              provider_uri=provider_uri)
        self.log.info("Connecting to {}".format(self.provider_uri))
        if self._provider is NO_BLOCKCHAIN_CONNECTION:
            raise self.NoProvider(
                "There are no configured blockchain providers")

        # Connect if not connected
        try:
            self.w3 = self.Web3(provider=self._provider)
            self.client = Web3Client.from_w3(w3=self.w3)
        except requests.ConnectionError:  # RPC
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is RPC enabled?'
            )
        except FileNotFoundError:  # IPC File Protocol
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is IPC enabled?'
            )
        else:
            self.attach_middleware()

        return self.is_connected

    def sync(self, emitter=None) -> None:

        sync_state = self.client.sync()
        if emitter is not None:

            emitter.echo(
                f"Syncing: {self.client.chain_name.capitalize()}. Waiting for sync to begin.",
                verbosity=1)

            while not len(self.client.peers):
                emitter.echo("waiting for peers...", verbosity=1)
                time.sleep(5)

            peer_count = len(self.client.peers)
            emitter.echo(
                f"Found {'an' if peer_count == 1 else peer_count} Ethereum peer{('s' if peer_count > 1 else '')}.",
                verbosity=1)

            try:
                emitter.echo("Beginning sync...", verbosity=1)
                initial_state = next(sync_state)
            except StopIteration:  # will occur if no syncing needs to happen
                emitter.echo("Local blockchain data is already synced.",
                             verbosity=1)
                return

            prior_state = initial_state
            total_blocks_to_sync = int(initial_state.get(
                'highestBlock', 0)) - int(initial_state.get('currentBlock', 0))
            with click.progressbar(
                    length=total_blocks_to_sync,
                    label="sync progress",
                    file=emitter.get_stream(verbosity=1)) as bar:
                for syncdata in sync_state:
                    if syncdata:
                        blocks_accomplished = int(
                            syncdata['currentBlock']) - int(
                                prior_state.get('currentBlock', 0))
                        bar.update(blocks_accomplished)
                        prior_state = syncdata
        else:
            try:
                for syncdata in sync_state:
                    self.client.log.info(
                        f"Syncing {syncdata['currentBlock']}/{syncdata['highestBlock']}"
                    )
            except TypeError:  # it's already synced
                return
        return

    @property
    def provider(self) -> Union[IPCProvider, WebsocketProvider, HTTPProvider]:
        return self._provider

    def _attach_provider(self,
                         provider: Web3Providers = None,
                         provider_uri: str = None) -> None:
        """
        https://web3py.readthedocs.io/en/latest/providers.html#providers
        """

        if not provider_uri and not provider:
            raise self.NoProvider("No URI or provider instances supplied.")

        if provider_uri and not provider:
            uri_breakdown = urlparse(provider_uri)

            if uri_breakdown.scheme == 'tester':
                providers = {
                    'pyevm': _get_tester_pyevm,
                    'geth': _get_test_geth_parity_provider,
                    'parity-ethereum': _get_test_geth_parity_provider,
                }
                provider_scheme = uri_breakdown.netloc

            else:
                providers = {
                    'auto': _get_auto_provider,
                    'infura': _get_infura_provider,
                    'ipc': _get_IPC_provider,
                    'file': _get_IPC_provider,
                    'ws': _get_websocket_provider,
                    'http': _get_HTTP_provider,
                    'https': _get_HTTP_provider,
                }
                provider_scheme = uri_breakdown.scheme

            # auto-detect for file based ipc
            if not provider_scheme:
                if os.path.exists(provider_uri):
                    # file is available - assume ipc/file scheme
                    provider_scheme = 'file'
                    self.log.info(
                        f"Auto-detected provider scheme as 'file://' for provider {provider_uri}"
                    )

            try:
                self._provider = providers[provider_scheme](provider_uri)
            except KeyError:
                raise self.UnsupportedProvider(
                    f"{provider_uri} is an invalid or unsupported blockchain provider URI"
                )
            else:
                self.provider_uri = provider_uri or NO_BLOCKCHAIN_CONNECTION
        else:
            self._provider = provider

    @validate_checksum_address
    def send_transaction(
        self,
        contract_function: ContractFunction,
        sender_address: str,
        payload: dict = None,
        transaction_gas_limit: int = None,
    ) -> dict:

        if self.transacting_power is READ_ONLY_INTERFACE:
            raise self.InterfaceError(str(READ_ONLY_INTERFACE))

        #
        # Build
        #

        if not payload:
            payload = {}

        nonce = self.client.w3.eth.getTransactionCount(sender_address,
                                                       'pending')
        payload.update({
            'chainId': int(self.client.chain_id),
            'nonce': nonce,
            'from': sender_address,
            'gasPrice': self.client.gas_price
        })

        if transaction_gas_limit:
            payload['gas'] = int(transaction_gas_limit)

        # Get interface name
        deployment = True if isinstance(contract_function,
                                        ContractConstructor) else False

        try:
            transaction_name = contract_function.fn_name.upper()
        except AttributeError:
            if deployment:
                transaction_name = 'DEPLOY'
            else:
                transaction_name = 'UNKNOWN'

        payload_pprint = dict(payload)
        payload_pprint['from'] = to_checksum_address(payload['from'])
        payload_pprint = ', '.join("{}: {}".format(k, v)
                                   for k, v in payload_pprint.items())
        self.log.debug(f"[TX-{transaction_name}] | {payload_pprint}")

        # Build transaction payload
        try:
            unsigned_transaction = contract_function.buildTransaction(payload)
        except (ValidationError, ValueError) as e:
            # TODO: #1504 - Handle validation failures for gas limits, invalid fields, etc.
            # Note: Geth raises ValueError in the same condition that pyevm raises ValidationError here.
            # Treat this condition as "Transaction Failed".
            error = str(e).replace("{", "").replace("}", "")  # See #724
            self.log.critical(f"Validation error: {error}")
            raise
        else:
            if deployment:
                self.log.info(
                    f"Deploying contract: {len(unsigned_transaction['data'])} bytes"
                )

        #
        # Broadcast
        #

        signed_raw_transaction = self.transacting_power.sign_transaction(
            unsigned_transaction)
        txhash = self.client.send_raw_transaction(signed_raw_transaction)

        try:
            receipt = self.client.wait_for_receipt(txhash,
                                                   timeout=self.TIMEOUT)
        except TimeExhausted:
            # TODO: #1504 - Handle transaction timeout
            raise
        else:
            self.log.debug(
                f"[RECEIPT-{transaction_name}] | txhash: {receipt['transactionHash'].hex()}"
            )

        #
        # Confirm
        #

        # Primary check
        deployment_status = receipt.get('status', UNKNOWN_TX_STATUS)
        if deployment_status == 0:
            failure = f"Transaction transmitted, but receipt returned status code 0. " \
                      f"Full receipt: \n {pprint.pformat(receipt, indent=2)}"
            raise self.InterfaceError(failure)

        if deployment_status is UNKNOWN_TX_STATUS:
            self.log.info(
                f"Unknown transaction status for {txhash} (receipt did not contain a status field)"
            )

            # Secondary check
            tx = self.client.get_transaction(txhash)
            if tx["gas"] == receipt["gasUsed"]:
                raise self.InterfaceError(
                    f"Transaction consumed 100% of transaction gas."
                    f"Full receipt: \n {pprint.pformat(receipt, indent=2)}")

        return receipt

    def get_contract_by_name(
        self,
        registry: BaseContractRegistry,
        contract_name: str,
        contract_version: str = None,
        enrollment_version: Union[int, str] = None,
        proxy_name: str = None,
        use_proxy_address: bool = True
    ) -> Union[VersionedContract, List[tuple]]:
        """
        Instantiate a deployed contract from registry data,
        and assimilate it with its proxy if it is upgradeable,
        or return all registered records if use_proxy_address is False.
        """
        target_contract_records = registry.search(
            contract_name=contract_name, contract_version=contract_version)

        if not target_contract_records:
            raise self.UnknownContract(
                f"No such contract records with name {contract_name}:{contract_version}."
            )

        if proxy_name:

            # Lookup proxies; Search for a published proxy that targets this contract record
            proxy_records = registry.search(contract_name=proxy_name)

            results = list()
            for proxy_name, proxy_version, proxy_address, proxy_abi in proxy_records:
                proxy_contract = self.client.w3.eth.contract(
                    abi=proxy_abi,
                    address=proxy_address,
                    version=proxy_version,
                    ContractFactoryClass=self._contract_factory)

                # Read this dispatcher's target address from the blockchain
                proxy_live_target_address = proxy_contract.functions.target(
                ).call()
                for target_name, target_version, target_address, target_abi in target_contract_records:

                    if target_address == proxy_live_target_address:
                        if use_proxy_address:
                            triplet = (proxy_address, target_version,
                                       target_abi)
                        else:
                            triplet = (target_address, target_version,
                                       target_abi)
                    else:
                        continue

                    results.append(triplet)

            if len(results) > 1:
                address, _version, _abi = results[0]
                message = "Multiple {} deployments are targeting {}".format(
                    proxy_name, address)
                raise self.InterfaceError(message.format(contract_name))

            else:
                try:
                    selected_address, selected_version, selected_abi = results[
                        0]
                except IndexError:
                    raise self.UnknownContract(
                        f"There are no Dispatcher records targeting '{contract_name}':{contract_version}"
                    )

        else:
            # NOTE: 0 must be allowed as a valid version number
            if len(target_contract_records) != 1:
                if enrollment_version is None:
                    m = f"{len(target_contract_records)} records enrolled " \
                        f"for contract {contract_name}:{contract_version} " \
                        f"and no version index was supplied."
                    raise self.InterfaceError(m)
                enrollment_version = self.__get_enrollment_version_index(
                    name=contract_name,
                    contract_version=contract_version,
                    version_index=enrollment_version,
                    enrollments=len(target_contract_records))

            else:
                enrollment_version = -1  # default

            _contract_name, selected_version, selected_address, selected_abi = target_contract_records[
                enrollment_version]

        # Create the contract from selected sources
        unified_contract = self.client.w3.eth.contract(
            abi=selected_abi,
            address=selected_address,
            version=selected_version,
            ContractFactoryClass=self._contract_factory)

        return unified_contract

    @staticmethod
    def __get_enrollment_version_index(version_index: Union[int, str],
                                       enrollments: int, name: str,
                                       contract_version: str):
        version_names = {'latest': -1, 'earliest': 0}
        try:
            version = version_names[version_index]
        except KeyError:
            try:
                version = int(version_index)
            except ValueError:
                what_is_this = version_index
                raise ValueError(
                    f"'{what_is_this}' is not a valid enrollment version number"
                )
            else:
                if version > enrollments - 1:
                    message = f"Version index '{version}' is larger than the number of enrollments " \
                              f"for {name}:{contract_version}."
                    raise ValueError(message)
        return version
Exemple #9
0
class CharacterConfiguration(BaseConfiguration):
    """
    'Sideways Engagement' of Character classes; a reflection of input parameters.
    """

    VERSION = 1  # bump when static payload scheme changes

    CHARACTER_CLASS = NotImplemented
    DEFAULT_CONTROLLER_PORT = NotImplemented
    DEFAULT_DOMAIN = NetworksInventory.DEFAULT
    DEFAULT_NETWORK_MIDDLEWARE = RestMiddleware
    TEMP_CONFIGURATION_DIR_PREFIX = 'tmp-nucypher'

    # Gas
    DEFAULT_GAS_STRATEGY = 'fast'

    def __init__(self,

                 # Base
                 emitter=None,
                 config_root: str = None,
                 filepath: str = None,

                 # Mode
                 dev_mode: bool = False,
                 federated_only: bool = False,

                 # Identity
                 checksum_address: str = None,
                 crypto_power: CryptoPower = None,

                 # Keyring
                 keyring: NucypherKeyring = None,
                 keyring_root: str = None,

                 # Learner
                 learn_on_same_thread: bool = False,
                 abort_on_learning_error: bool = False,
                 start_learning_now: bool = True,

                 # Network
                 controller_port: int = None,
                 domains: Set[str] = None,  # TODO: Mapping between learning domains and "registry" domains - #1580
                 interface_signature: Signature = None,
                 network_middleware: RestMiddleware = None,

                 # Node Storage
                 known_nodes: set = None,
                 node_storage: NodeStorage = None,
                 reload_metadata: bool = True,
                 save_metadata: bool = True,

                 # Blockchain
                 poa: bool = None,
                 light: bool = False,
                 sync: bool = False,
                 provider_uri: str = None,
                 provider_process=None,
                 gas_strategy: Union[Callable, str] = DEFAULT_GAS_STRATEGY,
                 signer_uri: str = None,

                 # Registry
                 registry: BaseContractRegistry = None,
                 registry_filepath: str = None):

        self.log = Logger(self.__class__.__name__)
        UNINITIALIZED_CONFIGURATION.bool_value(False)

        # Identity
        # NOTE: NodeConfigurations can only be used with Self-Characters
        self.is_me = True
        self.checksum_address = checksum_address

        # Keyring
        self.crypto_power = crypto_power
        self.keyring = keyring or NO_KEYRING_ATTACHED
        self.keyring_root = keyring_root or UNINITIALIZED_CONFIGURATION

        # Contract Registry
        if registry and registry_filepath:
            if registry.filepath != registry_filepath:
                error = f"Inconsistent registry filepaths for '{registry.filepath}' and '{registry_filepath}'."
                raise ValueError(error)
            else:
                self.log.warn(f"Registry and registry filepath were both passed.")
        self.registry = registry or NO_BLOCKCHAIN_CONNECTION.bool_value(False)
        self.registry_filepath = registry_filepath or UNINITIALIZED_CONFIGURATION

        # Blockchain
        self.poa = poa
        self.is_light = light
        self.provider_uri = provider_uri or NO_BLOCKCHAIN_CONNECTION
        self.provider_process = provider_process or NO_BLOCKCHAIN_CONNECTION
        self.signer_uri = signer_uri or NO_BLOCKCHAIN_CONNECTION

        # Learner
        self.federated_only = federated_only
        self.domains = domains or {self.DEFAULT_DOMAIN}
        self.learn_on_same_thread = learn_on_same_thread
        self.abort_on_learning_error = abort_on_learning_error
        self.start_learning_now = start_learning_now
        self.save_metadata = save_metadata
        self.reload_metadata = reload_metadata
        self.known_nodes = known_nodes or set()  # handpicked

        # Configuration
        self.__dev_mode = dev_mode
        self.config_file_location = filepath or UNINITIALIZED_CONFIGURATION
        self.config_root = UNINITIALIZED_CONFIGURATION

        #
        # Federated vs. Blockchain arguments consistency
        #

        #
        # Federated
        #

        if self.federated_only:
            # Check for incompatible values
            blockchain_args = {'filepath': registry_filepath,
                               'poa': poa,
                               'provider_process': provider_process,
                               'provider_uri': provider_uri,
                               'gas_strategy': gas_strategy}
            if any(blockchain_args.values()):
                bad_args = (f"{arg}={val}" for arg, val in blockchain_args.items() if val)
                self.log.warn(f"Arguments {bad_args} are incompatible with federated_only. "
                              f"Overridden with a sane default.")

                # Clear decentralized attributes to ensure consistency with a
                # federated configuration.
                self.poa = False
                self.is_light = False
                self.provider_uri = None
                self.provider_process = None
                self.registry_filepath = None
                self.gas_strategy = None

        #
        # Decentralized
        #

        else:
            self.gas_strategy = gas_strategy
            is_initialized = BlockchainInterfaceFactory.is_interface_initialized(provider_uri=self.provider_uri)
            if not is_initialized and provider_uri:
                BlockchainInterfaceFactory.initialize_interface(provider_uri=self.provider_uri,
                                                                poa=self.poa,
                                                                light=self.is_light,
                                                                provider_process=self.provider_process,
                                                                sync=sync,
                                                                emitter=emitter,
                                                                gas_strategy=gas_strategy)
            else:
                self.log.warn(f"Using existing blockchain interface connection ({self.provider_uri}).")

            if not self.registry:
                # TODO: These two code blocks are untested.
                if not self.registry_filepath:  # TODO: Registry URI  (goerli://speedynet.json) :-)
                    self.log.info(f"Fetching latest registry from source.")
                    self.registry = InMemoryContractRegistry.from_latest_publication(network=list(self.domains)[0])  # TODO: #1580
                else:
                    self.registry = LocalContractRegistry(filepath=self.registry_filepath)
                    self.log.info(f"Using local registry ({self.registry}).")

        if dev_mode:
            self.__temp_dir = UNINITIALIZED_CONFIGURATION
            self.__setup_node_storage()
            self.initialize(password=DEVELOPMENT_CONFIGURATION)
        else:
            self.__temp_dir = LIVE_CONFIGURATION
            self.config_root = config_root or self.DEFAULT_CONFIG_ROOT
            self._cache_runtime_filepaths()
            self.__setup_node_storage(node_storage=node_storage)

        # Network
        self.controller_port = controller_port or self.DEFAULT_CONTROLLER_PORT
        self.network_middleware = network_middleware or self.DEFAULT_NETWORK_MIDDLEWARE(registry=self.registry)
        self.interface_signature = interface_signature

        super().__init__(filepath=self.config_file_location, config_root=self.config_root)

    def __call__(self, **character_kwargs):
        return self.produce(**character_kwargs)

    def update(self, **kwargs) -> None:
        """
        A facility for updating existing attributes on existing configuration instances.

        Warning: This method allows mutation and may result in an inconsistent configuration.
        """
        return super().update(modifier=self.checksum_address, filepath=self.config_file_location, **kwargs)

    @classmethod
    def generate(cls, password: str, *args, **kwargs):
        """Shortcut: Hook-up a new initial installation and write configuration file to the disk"""
        node_config = cls(dev_mode=False, *args, **kwargs)
        node_config.initialize(password=password)
        node_config.to_configuration_file()
        return node_config

    def cleanup(self) -> None:
        if self.__dev_mode:
            self.__temp_dir.cleanup()

    @property
    def dev_mode(self) -> bool:
        return self.__dev_mode

    def __setup_node_storage(self, node_storage=None) -> None:
        if self.dev_mode:
            node_storage = ForgetfulNodeStorage(registry=self.registry, federated_only=self.federated_only)
        elif not node_storage:
            node_storage = LocalFileBasedNodeStorage(registry=self.registry,
                                                     config_root=self.config_root,
                                                     federated_only=self.federated_only)
        self.node_storage = node_storage

    def forget_nodes(self) -> None:
        self.node_storage.clear()
        message = "Removed all stored node node metadata and certificates"
        self.log.debug(message)

    def destroy(self) -> None:
        """Parse a node configuration and remove all associated files from the filesystem"""
        self.attach_keyring()
        self.keyring.destroy()
        os.remove(self.config_file_location)

    def generate_parameters(self, **overrides) -> dict:
        """
        Warning: This method allows mutation and may result in an inconsistent configuration.
        """
        merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
        non_init_params = ('config_root',
                           'poa',
                           'light',
                           'provider_uri',
                           'registry_filepath',
                           'gas_strategy',
                           'signer_uri')
        character_init_params = filter(lambda t: t[0] not in non_init_params, merged_parameters.items())
        return dict(character_init_params)

    def produce(self, **overrides) -> CHARACTER_CLASS:
        """Initialize a new character instance and return it."""
        merged_parameters = self.generate_parameters(**overrides)
        character = self.CHARACTER_CLASS(**merged_parameters)
        return character

    @classmethod
    def assemble(cls, filepath: str = None, **overrides) -> dict:
        """
        Warning: This method allows mutation and may result in an inconsistent configuration.
        """
        payload = cls._read_configuration_file(filepath=filepath)
        node_storage = cls.load_node_storage(storage_payload=payload['node_storage'],
                                             federated_only=payload['federated_only'])
        domains = set(payload['domains'])

        # Assemble
        payload.update(dict(node_storage=node_storage, domains=domains))
        # Filter out None values from **overrides to detect, well, overrides...
        # Acts as a shim for optional CLI flags.
        overrides = {k: v for k, v in overrides.items() if v is not None}
        payload = {**payload, **overrides}
        return payload

    @classmethod
    def from_configuration_file(cls,
                                filepath: str = None,
                                provider_process=None,
                                **overrides  # < ---- Inlet for CLI Flags
                                ) -> 'CharacterConfiguration':
        """Initialize a CharacterConfiguration from a JSON file."""
        filepath = filepath or cls.default_filepath()
        assembled_params = cls.assemble(filepath=filepath, **overrides)
        try:
            node_configuration = cls(filepath=filepath, provider_process=provider_process, **assembled_params)
        except TypeError as e:
            raise cls.ConfigurationError(e)
        return node_configuration

    def validate(self) -> bool:

        # Top-level
        if not os.path.exists(self.config_root):
            raise self.ConfigurationError(f'No configuration directory found at {self.config_root}.')

        # Sub-paths
        filepaths = self.runtime_filepaths
        for field, path in filepaths.items():
            if path and not os.path.exists(path):
                message = 'Missing configuration file or directory: {}.'
                if 'registry' in path:
                    message += ' Did you mean to pass --federated-only?'
                raise CharacterConfiguration.InvalidConfiguration(message.format(path))
        return True

    def static_payload(self) -> dict:
        """Exported static configuration values for initializing Ursula"""

        payload = dict(

            # Identity
            federated_only=self.federated_only,
            checksum_address=self.checksum_address,
            keyring_root=self.keyring_root,

            # Behavior
            domains=list(self.domains),  # From Set
            learn_on_same_thread=self.learn_on_same_thread,
            abort_on_learning_error=self.abort_on_learning_error,
            start_learning_now=self.start_learning_now,
            save_metadata=self.save_metadata,
            node_storage=self.node_storage.payload(),
        )

        # Optional values (mode)
        if not self.federated_only:
            if self.provider_uri:
                if not self.signer_uri:
                    self.signer_uri = self.provider_uri
                payload.update(dict(provider_uri=self.provider_uri,
                                    poa=self.poa,
                                    light=self.is_light,
                                    signer_uri=self.signer_uri))
            if self.registry_filepath:
                payload.update(dict(registry_filepath=self.registry_filepath))

            # Gas Price
            payload.update(dict(gas_strategy=self.gas_strategy))

        # Merge with base payload
        base_payload = super().static_payload()
        base_payload.update(payload)

        return payload

    @property  # TODO: Graduate to a method and "derive" dynamic from static payload.
    def dynamic_payload(self) -> dict:
        """Exported dynamic configuration values for initializing Ursula"""
        payload = dict()
        if not self.federated_only:
            payload.update(dict(registry=self.registry, signer=Signer.from_signer_uri(self.signer_uri)))

        payload.update(dict(network_middleware=self.network_middleware or self.DEFAULT_NETWORK_MIDDLEWARE(),
                            known_nodes=self.known_nodes,
                            node_storage=self.node_storage,
                            crypto_power_ups=self.derive_node_power_ups()))
        return payload

    def generate_filepath(self, filepath: str = None, modifier: str = None, override: bool = False) -> str:
        modifier = modifier or self.checksum_address
        filepath = super().generate_filepath(filepath=filepath, modifier=modifier, override=override)
        return filepath

    @property
    def runtime_filepaths(self) -> dict:
        filepaths = dict(config_root=self.config_root,
                         keyring_root=self.keyring_root,
                         registry_filepath=self.registry_filepath)
        return filepaths

    @classmethod
    def generate_runtime_filepaths(cls, config_root: str) -> dict:
        """Dynamically generate paths based on configuration root directory"""
        filepaths = dict(config_root=config_root,
                         config_file_location=os.path.join(config_root, cls.generate_filename()),
                         keyring_root=os.path.join(config_root, 'keyring'))
        return filepaths

    def _cache_runtime_filepaths(self) -> None:
        """Generate runtime filepaths and cache them on the config object"""
        filepaths = self.generate_runtime_filepaths(config_root=self.config_root)
        for field, filepath in filepaths.items():
            if getattr(self, field) is UNINITIALIZED_CONFIGURATION:
                setattr(self, field, filepath)

    def attach_keyring(self, checksum_address: str = None, *args, **kwargs) -> None:
        account = checksum_address or self.checksum_address
        if not account:
            raise self.ConfigurationError("No account specified to unlock keyring")
        if self.keyring is not NO_KEYRING_ATTACHED:
            if self.keyring.checksum_address != account:
                raise self.ConfigurationError("There is already a keyring attached to this configuration.")
            return
        self.keyring = NucypherKeyring(keyring_root=self.keyring_root, account=account, *args, **kwargs)

    def derive_node_power_ups(self) -> List[CryptoPowerUp]:
        power_ups = list()
        if self.is_me and not self.dev_mode:
            for power_class in self.CHARACTER_CLASS._default_crypto_powerups:
                power_up = self.keyring.derive_crypto_power(power_class)
                power_ups.append(power_up)
        return power_ups

    def initialize(self, password: str) -> str:
        """Initialize a new configuration and write installation files to disk."""

        # Development
        if self.dev_mode:
            self.__temp_dir = TemporaryDirectory(prefix=self.TEMP_CONFIGURATION_DIR_PREFIX)
            self.config_root = self.__temp_dir.name

        # Persistent
        else:
            self._ensure_config_root_exists()
            self.write_keyring(password=password)

        self._cache_runtime_filepaths()
        self.node_storage.initialize()

        # Validate
        if not self.__dev_mode:
            self.validate()

        # Success
        message = "Created nucypher installation files at {}".format(self.config_root)
        self.log.debug(message)
        return self.config_root

    def write_keyring(self, password: str, checksum_address: str = None, **generation_kwargs) -> NucypherKeyring:

        if self.federated_only:
            checksum_address = FEDERATED_ADDRESS

        elif not checksum_address:

            # Note: It is assumed the blockchain interface is not yet connected.
            if self.provider_process:

                # Generate Geth's "datadir"
                if not os.path.exists(self.provider_process.data_dir):
                    os.mkdir(self.provider_process.data_dir)

                # Get or create wallet address
                if not self.checksum_address:
                    self.checksum_address = self.provider_process.ensure_account_exists(password=password)
                elif self.checksum_address not in self.provider_process.accounts():
                    raise self.ConfigurationError(f'Unknown Account {self.checksum_address}')

            elif not self.checksum_address:
                raise self.ConfigurationError(f'No checksum address provided for decentralized configuration.')

            checksum_address = self.checksum_address

        self.keyring = NucypherKeyring.generate(password=password,
                                                keyring_root=self.keyring_root,
                                                checksum_address=checksum_address,
                                                **generation_kwargs)

        if self.federated_only:
            self.checksum_address = self.keyring.checksum_address

        return self.keyring

    @classmethod
    def load_node_storage(cls, storage_payload: dict, federated_only: bool):
        from nucypher.config.storages import NodeStorage
        node_storage_subclasses = {storage._name: storage for storage in NodeStorage.__subclasses__()}
        storage_type = storage_payload[NodeStorage._TYPE_LABEL]
        storage_class = node_storage_subclasses[storage_type]
        node_storage = storage_class.from_payload(payload=storage_payload, federated_only=federated_only)
        return node_storage
Exemple #10
0
class LocalFileBasedNodeStorage(NodeStorage):
    _name = 'local'
    __METADATA_FILENAME_TEMPLATE = '{}.node'

    class NoNodeMetadataFileFound(FileNotFoundError, NodeStorage.UnknownNode):
        pass

    def __init__(self,
                 config_root: str = None,
                 storage_root: str = None,
                 metadata_dir: str = None,
                 certificates_dir: str = None,
                 *args, **kwargs
                 ) -> None:

        super().__init__(*args, **kwargs)
        self.log = Logger(self.__class__.__name__)

        self.root_dir = storage_root
        self.metadata_dir = metadata_dir
        self.certificates_dir = certificates_dir
        self._cache_storage_filepaths(config_root=config_root)

    @staticmethod
    def _generate_storage_filepaths(config_root: str = None,
                                    storage_root: str = None,
                                    metadata_dir: str = None,
                                    certificates_dir: str = None):

        storage_root = storage_root or os.path.join(config_root or DEFAULT_CONFIG_ROOT, 'known_nodes')
        metadata_dir = metadata_dir or os.path.join(storage_root, 'metadata')
        certificates_dir = certificates_dir or os.path.join(storage_root, 'certificates')

        payload = {'storage_root': storage_root,
                   'metadata_dir': metadata_dir,
                   'certificates_dir': certificates_dir}

        return payload

    def _cache_storage_filepaths(self, config_root: str = None):
        filepaths = self._generate_storage_filepaths(config_root=config_root,
                                                     storage_root=self.root_dir,
                                                     metadata_dir=self.metadata_dir,
                                                     certificates_dir=self.certificates_dir)
        self.root_dir = filepaths['storage_root']
        self.metadata_dir = filepaths['metadata_dir']
        self.certificates_dir = filepaths['certificates_dir']

    #
    # Certificates
    #

    @validate_checksum_address
    def __get_certificate_filename(self, checksum_address: str):
        return '{}.{}'.format(checksum_address, Encoding.PEM.name.lower())

    def __get_certificate_filepath(self, certificate_filename: str) -> str:
        return os.path.join(self.certificates_dir, certificate_filename)

    @validate_checksum_address
    def generate_certificate_filepath(self, checksum_address: str) -> str:
        certificate_filename = self.__get_certificate_filename(checksum_address)
        certificate_filepath = self.__get_certificate_filepath(certificate_filename=certificate_filename)
        return certificate_filepath

    @validate_checksum_address
    def __read_tls_public_certificate(self, filepath: str = None, checksum_address: str = None) -> Certificate:
        """Deserialize an X509 certificate from a filepath"""
        if not bool(filepath) ^ bool(checksum_address):
            raise ValueError("Either pass filepath or checksum_address; Not both.")

        if not filepath and checksum_address is not None:
            filepath = self.generate_certificate_filepath(checksum_address)

        try:
            with open(filepath, 'rb') as certificate_file:
                cert = x509.load_pem_x509_certificate(certificate_file.read(), backend=default_backend())
                return cert
        except FileNotFoundError:
            raise FileNotFoundError("No SSL certificate found at {}".format(filepath))

    #
    # Metadata
    #

    @validate_checksum_address
    def __generate_metadata_filepath(self, checksum_address: str, metadata_dir: str = None) -> str:
        metadata_path = os.path.join(metadata_dir or self.metadata_dir,
                                     self.__METADATA_FILENAME_TEMPLATE.format(checksum_address))
        return metadata_path

    def __read_metadata(self, filepath: str, federated_only: bool):
        from nucypher.characters.lawful import Ursula
        try:
            with open(filepath, "rb") as seed_file:
                seed_file.seek(0)
                node_bytes = self.deserializer(seed_file.read())
                node = Ursula.from_bytes(node_bytes, federated_only=federated_only)
        except FileNotFoundError:
            raise self.UnknownNode
        return node

    def __write_metadata(self, filepath: str, node):
        with open(filepath, "wb") as f:
            f.write(self.serializer(self.character_class.__bytes__(node)))
        self.log.info("Wrote new node metadata to filesystem {}".format(filepath))
        return filepath

    #
    # API
    #
    def all(self, federated_only: bool, certificates_only: bool = False) -> Set[Union[Any, Certificate]]:
        filenames = os.listdir(self.certificates_dir if certificates_only else self.metadata_dir)
        self.log.info("Found {} known node metadata files at {}".format(len(filenames), self.metadata_dir))

        known_certificates = set()
        if certificates_only:
            for filename in filenames:
                certificate = self.__read_tls_public_certificate(os.path.join(self.certificates_dir, filename))
                known_certificates.add(certificate)
            return known_certificates

        else:
            known_nodes = set()
            for filename in filenames:
                metadata_path = os.path.join(self.metadata_dir, filename)
                node = self.__read_metadata(filepath=metadata_path, federated_only=federated_only)  # TODO: 466
                known_nodes.add(node)
            return known_nodes

    @validate_checksum_address
    def get(self, checksum_address: str, federated_only: bool, certificate_only: bool = False):
        if certificate_only is True:
            certificate = self.__read_tls_public_certificate(checksum_address=checksum_address)
            return certificate
        metadata_path = self.__generate_metadata_filepath(checksum_address=checksum_address)
        node = self.__read_metadata(filepath=metadata_path, federated_only=federated_only)  # TODO: 466
        return node

    def store_node_certificate(self, certificate: Certificate):
        certificate_filepath = self._write_tls_certificate(certificate=certificate)
        return certificate_filepath

    def store_node_metadata(self, node, filepath: str = None) -> str:
        address = node.checksum_public_address
        filepath = self.__generate_metadata_filepath(checksum_address=address, metadata_dir=filepath)
        self.__write_metadata(filepath=filepath, node=node)
        return filepath

    def save_node(self, node, force) -> Tuple[str, str]:
        certificate_filepath = self.store_node_certificate(certificate=node.certificate)
        metadata_filepath = self.store_node_metadata(node=node)
        return metadata_filepath, certificate_filepath

    @validate_checksum_address
    def remove(self, checksum_address: str, metadata: bool = True, certificate: bool = True) -> None:

        if metadata is True:
            metadata_filepath = self.__generate_metadata_filepath(checksum_address=checksum_address)
            os.remove(metadata_filepath)
            self.log.debug("Deleted {} from the filesystem".format(checksum_address))

        if certificate is True:
            certificate_filepath = self.generate_certificate_filepath(checksum_address=checksum_address)
            os.remove(certificate_filepath)
            self.log.debug("Deleted {} from the filesystem".format(checksum_address))

        return

    def clear(self, metadata: bool = True, certificates: bool = True) -> None:
        """Forget all stored nodes and certificates"""

        def __destroy_dir_contents(path):
            for file in os.listdir(path):
                file_path = os.path.join(path, file)
                if os.path.isfile(file_path):
                    os.unlink(file_path)

        if metadata is True:
            __destroy_dir_contents(self.metadata_dir)
        if certificates is True:
            __destroy_dir_contents(self.certificates_dir)

        return

    def payload(self) -> dict:
        payload = {
            'storage_type': self._name,
            'storage_root': self.root_dir,
            'metadata_dir': self.metadata_dir,
            'certificates_dir': self.certificates_dir
        }
        return payload

    @classmethod
    def from_payload(cls, payload: dict, *args, **kwargs) -> 'LocalFileBasedNodeStorage':
        storage_type = payload[cls._TYPE_LABEL]
        if not storage_type == cls._name:
            raise cls.NodeStorageError("Wrong storage type. got {}".format(storage_type))
        del payload['storage_type']

        return cls(*args, **payload, **kwargs)

    def initialize(self) -> bool:
        try:
            os.mkdir(self.root_dir, mode=0o755)
            os.mkdir(self.metadata_dir, mode=0o755)
            os.mkdir(self.certificates_dir, mode=0o755)
        except FileExistsError:
            message = "There are pre-existing files at {}".format(self.root_dir)
            raise self.NodeStorageError(message)
        except FileNotFoundError:
            raise self.NodeStorageError("There is no existing configuration at {}".format(self.root_dir))

        return bool(all(map(os.path.isdir, (self.root_dir, self.metadata_dir, self.certificates_dir))))
class FSProtocol(basic.LineReceiver):
    class State(Enum):
        IDLE = 0
        POLLING = 1
        POLLED = 2
        RAW = 3

    def __init__(self):
        self.log = Logger()
        self.setIdleState()

    def setIdleState(self):
        self.my_state = self.State.IDLE
        self.wait_for_list = None
        self.file_transfer_peer = None
        self.setLineMode()

    def connectionMade(self):
        self.log.info("New connection established: {peer!r}, {pid!s}",
                      peer=self.transport.getPeer(),
                      pid=id(self))
        self.factory.protocols.append(self)
        self.transport.write(b'Hello form server\r\n')

    def send(self, command, argument):
        if command != "":
            command += " "
        self.transport.write(command.upper().encode() + argument.encode() +
                             b'\r\n')
        #  self.log.info("Sending to {peer!r}: {command!s} {argument!s}",
        #          command=command, argument=argument)

    def doReject(self, line):
        self.log.error("We had to say no to {peer!r}: {line}",
                       peer=self.transport.getPeer(),
                       line=line)
        self.transport.write(b'REJECT ' + line.encode() + b'\r\n')

    #  def doRequest(self, regex):
    #      self.log.debug("server asks for files: {regex!s}", regex=regex)
    #      self.transport.write(b'REQUEST ' + regex.encode() + b'\r\n')

    def requestFiles(self, regex):
        '''
        Called by factory, if one of users requested a search
        '''
        if self.my_state != self.State.IDLE or self.wait_for_list is not None:
            return defer.fail(RuntimeError("Connection is busy"))

        # Go to POLLED state, so no one will interrupt us
        self.my_state = self.State.POLLED

        self.send("REQUEST", regex)

        def debuglogc(data):
            self.log.debug("wait_for_list:requestFiles:callback: {data!r}",
                           data=data)
            return data

        def debugloge(err):
            self.log.debug("wait_for_list:requestFiles:errback: {err!r}",
                           err=err)
            return err

        self.wait_for_list = defer.Deferred().addCallbacks(
            debuglogc, debugloge)
        # We create Deferred, that will be fired as the response comes from user
        return self.wait_for_list

    def trySetStateIdle(self):
        # Some operation has finished
        # Check if there is file request
        #  if len(self.outer_req) > 0:
        #      # Set state
        #      self.state = self.State.POLLED
        #      # Ask peer
        #      regex,d = self.outer_req[0]
        #      self.transport.write("TODO some message" + regex + b'\r\n')
        pass

    def handleFind(self, regex):
        '''
        Hanles the FIND request from user
        '''
        self.my_state = self.State.POLLING

        # The factory will ask all the others about our regexp
        # The result here is the string
        d = self.factory.getFiles(self, regex)

        def response(ans):
            self.log.info("FIND command returns next {filelist!s}",
                          filelist=ans)
            self.send('RESPONSE', ans)

        def error(e):
            self.doReject(e.getErrorMessage())

        def idle(ignore):
            self.my_state = self.State.IDLE

        d.addCallbacks(response, error)
        d.addBoth(idle)

    def lineReceived(self, line):
        self.log.info("lineReceived: {peer!r} got message: {line!s}",
                      peer=self.transport.getPeer(),
                      line=line)

        line = line.decode()
        data = cleanInput(line)

        if len(data) == 0 or data == '':
            self.log.info("Oh, never mind, it's gone")
            self.doReject("Seems like a bunch of tabs")
            return

        command = data[0].upper()

        argument = data[1] if len(data) > 1 else None

        # Analize the command
        if command == 'FIND':  # Find files from other users
            if self.my_state != self.State.IDLE:
                self.doReject("Another operation is in run")
                return
            if argument is None:
                self.doReject("Regex expected")
                return
            self.handleFind(argument)

        elif command == 'FILES':  # Polled user returns list of files here
            if self.my_state != self.State.POLLED:
                self.doReject(
                    "Got list of files, but there is no request for them")
                return
            #  self.log.debug("Got list of files from {peer!r}", peer=self.transport.getPeer())
            if argument is None:
                self.wait_for_list.errback(
                    RuntimeError("This peer has no such files"))
            else:
                # Return self id and list of files
                self.wait_for_list.callback(':'.join([str(id(self)),
                                                      argument]))
            # Cleanup
            self.wait_for_list = None
            self.my_state = self.State.IDLE

        elif command == 'GET':  # User asks for file
            '''
            This is a routine of connection, whitch needs file
            '''
            # Check if we ready
            if self.my_state != self.State.IDLE:
                self.doReject("Can not give you file at the moment")
                return
            # Get arguments
            if argument is None:
                self.doReject("Expected filename for GET operation")
                return
            try:
                pid, filename = argument.split(':', 1)
            except (IndexError):
                self.doReject("not valid filename")
                return

            # Find the peer
            peer = self.factory.findConnById(pid)

            # Check if we are ready
            if peer is None:
                self.doReject("Sorry, the peer seems afk")
                return
            if peer.my_state != peer.State.IDLE:
                self.doReject("Sorry, the peer seems busy")
                return

            # Ready to begin, don't interrupt us please
            peer.my_state = peer.State.RAW
            self.my_state = self.State.RAW

            peer.file_transfer_peer = self
            self.file_transfer_peer = peer

            peer.send("OBTAIN", filename)
            self.send("RAW", filename)

            self.setRawMode()
            peer.setRawMode()

        elif command == 'ALICE':  # User remembers how this project started
            self.send('', "nice girl")

        else:
            self.doReject("not a protocol command: %s" % command)

    def rawDataReceived(self, data):
        '''
        Should be called, if user sends data to peer in response
        '''
        if self.file_transfer_peer is None:
            self.log.warning("RECIEVING RAW DATA WITHOUT PEER TO RESEND")
            return
        peer = self.file_transfer_peer
        self.log.info("RAW: resending data: %d KB" % len(data))
        peer.transport.write(data)
        if not data.endswith(b'\r\n'):
            # Transmission not done yet
            return
        self.log.info("RAW: Transmission complete")
        peer.setIdleState()
        self.setIdleState()

    def connectionLost(self, reason):
        self.log.info("Connection closed: {peer!r}, {reason!r}",
                      peer=self.transport.getPeer(),
                      reason=reason)
        self.factory.protocols.remove(self)
Exemple #12
0
class SolidityCompiler:

    __default_contract_version = 'v0.0.0'
    __default_contract_dir = os.path.join(dirname(abspath(__file__)), 'source')

    __compiled_contracts_dir = 'contracts'
    __zeppelin_library_dir = 'zeppelin'
    __aragon_library_dir = 'aragon'

    optimization_runs = 200

    class CompilerError(Exception):
        pass

    class VersionError(Exception):
        pass

    @classmethod
    def default_contract_dir(cls):
        return cls.__default_contract_dir

    def __init__(self,
                 source_dirs: List[SourceDirs] = None,
                 ignore_solidity_check: bool = False) -> None:

        # Allow for optional installation
        from solcx.install import get_executable

        self.log = Logger('solidity-compiler')

        version = SOLIDITY_COMPILER_VERSION if not ignore_solidity_check else None
        self.__sol_binary_path = get_executable(version=version)

        if source_dirs is None or len(source_dirs) == 0:
            self.source_dirs = [
                SourceDirs(root_source_dir=self.__default_contract_dir)
            ]
        else:
            self.source_dirs = source_dirs

    def compile(self) -> dict:
        interfaces = dict()
        for root_source_dir, other_source_dirs in self.source_dirs:
            if root_source_dir is None:
                self.log.warn("One of the root directories is None")
                continue

            raw_interfaces = self._compile(root_source_dir, other_source_dirs)
            for name, data in raw_interfaces.items():
                # Extract contract version from docs
                version_search = re.search(
                    r"""
                
                \"details\":  # @dev tag in contract docs
                \".*?         # Skip any data in the beginning of details
                \|            # Beginning of version definition |
                (v            # Capture version starting from symbol v
                \d+           # At least one digit of major version
                \.            # Digits splitter
                \d+           # At least one digit of minor version
                \.            # Digits splitter
                \d+           # At least one digit of patch
                )             # End of capturing
                \|            # End of version definition |
                .*?\"         # Skip any data in the end of details
                
                """, data['devdoc'], re.VERBOSE)
                version = version_search.group(
                    1) if version_search else self.__default_contract_version
                try:
                    existence_data = interfaces[name]
                except KeyError:
                    existence_data = dict()
                    interfaces.update({name: existence_data})
                if version not in existence_data:
                    existence_data.update({version: data})
        return interfaces

    def _compile(self, root_source_dir: str, other_source_dirs: [str]) -> dict:
        """Executes the compiler with parameters specified in the json config"""

        # Allow for optional installation
        from solcx import compile_files
        from solcx.exceptions import SolcError

        self.log.info("Using solidity compiler binary at {}".format(
            self.__sol_binary_path))
        contracts_dir = os.path.join(root_source_dir,
                                     self.__compiled_contracts_dir)
        self.log.info(
            "Compiling solidity source files at {}".format(contracts_dir))

        source_paths = set()
        source_walker = os.walk(top=contracts_dir, topdown=True)
        if other_source_dirs is not None:
            for source_dir in other_source_dirs:
                other_source_walker = os.walk(top=source_dir, topdown=True)
                source_walker = itertools.chain(source_walker,
                                                other_source_walker)

        for root, dirs, files in source_walker:
            for filename in files:
                if filename.endswith('.sol'):
                    path = os.path.join(root, filename)
                    source_paths.add(path)
                    self.log.debug(
                        "Collecting solidity source {}".format(path))

        # Compile with remappings: https://github.com/ethereum/py-solc
        zeppelin_dir = os.path.join(root_source_dir,
                                    self.__zeppelin_library_dir)
        aragon_dir = os.path.join(root_source_dir, self.__aragon_library_dir)

        remappings = (
            "contracts={}".format(contracts_dir),
            "zeppelin={}".format(zeppelin_dir),
            "aragon={}".format(aragon_dir),
        )

        self.log.info("Compiling with import remappings {}".format(
            ", ".join(remappings)))

        optimization_runs = self.optimization_runs

        try:
            compiled_sol = compile_files(source_files=source_paths,
                                         solc_binary=self.__sol_binary_path,
                                         import_remappings=remappings,
                                         allow_paths=root_source_dir,
                                         optimize=True,
                                         optimize_runs=optimization_runs)

            self.log.info(
                "Successfully compiled {} contracts with {} optimization runs".
                format(len(compiled_sol), optimization_runs))

        except FileNotFoundError:
            raise RuntimeError(
                "The solidity compiler is not at the specified path. "
                "Check that the file exists and is executable.")
        except PermissionError:
            raise RuntimeError(
                "The solidity compiler binary at {} is not executable. "
                "Check the file's permissions.".format(self.__sol_binary_path))

        except SolcError:
            raise

        # Cleanup the compiled data keys
        interfaces = {
            name.split(':')[-1]: compiled_sol[name]
            for name in compiled_sol
        }
        return interfaces
Exemple #13
0
# Logging services
#
@implementer(ILogObserver)
class MyLogObserver(object):
    def __call__(self, event):
        text = event['log_format'].format(**event)
        sys.stdout.write(text + '\n')
        sys.stdout.flush()

#globalLogBeginner.beginLoggingTo([textFileLogObserver(sys.stdout)])
#globalLogBeginner.beginLoggingTo([LegacySyslogObserver('telldus-logger')])
#globalLogBeginner.beginLoggingTo([MyLogObserver()])
globalLogPublisher.addObserver(MyLogObserver())

log = Logger()
log.info("Starting logging")



class GraphiteProtocol(Protocol):
    noisy = False

    def __init__(self, name, readings):
        self.msg = self.compile_msg(name, readings)
        log.info("Sending {n}: {v}", n=name, v=readings)


    def compile_msg(self, name, readings):

        # Some sensors have a tendency to glitch. So if the temp difference
        # from the last reading is >20 degrees, then this reading is a glitch
Exemple #14
0
import sys

from twisted.internet import reactor, endpoints
from twisted.logger import Logger
from twisted.python import log
from twisted.web import server

from chat import NetCatChatFactory
from api import Root

# Configure logging to standard out.
logger = Logger()
log.startLogging(sys.stdout)

# Create an instance of the factories.
factory = NetCatChatFactory()
site = server.Site(Root(factory))

# Listen on TCP port 1400 for chat and port 8080 for the API.
endpoints.serverFromString(reactor, "tcp:1400").listen(factory)
endpoints.serverFromString(reactor, "tcp:8080").listen(site)

# Start listening for connections (and run the event-loop).
logger.info("Listening for netcat on port 1400")
logger.info("Listening for HTTP on port 8080")
reactor.run()

# Note that any code after this point will *not* be executed. reactor.run enters
# an infinite loop until shutdown.
Exemple #15
0
class Felix(Character, NucypherTokenActor):
    """
    A NuCypher ERC20 faucet / Airdrop scheduler.

    Felix is a web application that gives NuCypher *testnet* tokens to registered addresses
    with a scheduled reduction of disbursement amounts, and an HTTP endpoint
    for handling new address registration.

    The main goal of Felix is to provide a source of testnet tokens for
    research and the development of production-ready nucypher dApps.
    """

    _default_crypto_powerups = [SigningPower, BlockchainPower]

    TEMPLATE_NAME = 'felix.html'

    # Intervals
    DISTRIBUTION_INTERVAL = 60 * 60  # seconds (60*60=1Hr)
    DISBURSEMENT_INTERVAL = 24  # (24) hours
    STAGING_DELAY = 10  # seconds

    # Disbursement
    BATCH_SIZE = 10  # transactions
    MULTIPLIER = 0.95  # 5% reduction of previous stake is 0.95, for example
    MINIMUM_DISBURSEMENT = 1e18  # NuNits
    ETHER_AIRDROP_AMOUNT = int(2e18)  # Wei

    # Node Discovery
    LEARNING_TIMEOUT = 30  # seconds
    _SHORT_LEARNING_DELAY = 60  # seconds
    _LONG_LEARNING_DELAY = 120  # seconds
    _ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN = 1

    # Twisted
    _CLOCK = reactor
    _AIRDROP_QUEUE = dict()

    class NoDatabase(RuntimeError):
        pass

    def __init__(self,
                 db_filepath: str,
                 rest_host: str,
                 rest_port: int,
                 crash_on_error: bool = False,
                 economics: TokenEconomics = None,
                 distribute_ether: bool = True,
                 *args,
                 **kwargs):

        # Character
        super().__init__(*args, **kwargs)
        self.log = Logger(f"felix-{self.checksum_address[-6::]}")

        # Network
        self.rest_port = rest_port
        self.rest_host = rest_host
        self.rest_app = NOT_RUNNING
        self.crash_on_error = crash_on_error

        # Database
        self.db_filepath = db_filepath
        self.db = NO_DATABASE_AVAILABLE
        self.db_engine = create_engine(f'sqlite:///{self.db_filepath}',
                                       convert_unicode=True)

        # Blockchain
        self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)
        self.reserved_addresses = [
            self.checksum_address, Blockchain.NULL_ADDRESS
        ]

        # Update reserved addresses with deployed contracts
        existing_entries = list(
            self.blockchain.interface.registry.enrolled_addresses)
        self.reserved_addresses.extend(existing_entries)

        # Distribution
        self.__distributed = 0  # Track NU Output
        self.__airdrop = 0  # Track Batch
        self.__disbursement = 0  # Track Quantity
        self._distribution_task = LoopingCall(f=self.airdrop_tokens)
        self._distribution_task.clock = self._CLOCK
        self.start_time = NOT_RUNNING

        if not economics:
            economics = TokenEconomics()
        self.economics = economics

        self.MAXIMUM_DISBURSEMENT = economics.maximum_allowed_locked
        self.INITIAL_DISBURSEMENT = economics.minimum_allowed_locked

        # Optionally send ether with each token transaction
        self.distribute_ether = distribute_ether

        # Banner
        self.log.info(FELIX_BANNER.format(self.checksum_address))

    def __repr__(self):
        class_name = self.__class__.__name__
        r = f'{class_name}(checksum_address={self.checksum_address}, db_filepath={self.db_filepath})'
        return r

    def make_web_app(self):
        from flask import request
        from flask_sqlalchemy import SQLAlchemy

        # WSGI/Flask Service
        short_name = bytes(self.stamp).hex()[:6]
        self.rest_app = Flask(f"faucet-{short_name}",
                              template_folder=TEMPLATES_DIR)
        self.rest_app.config[
            'SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{self.db_filepath}'
        try:
            self.rest_app.secret_key = sha256(
                os.environ['NUCYPHER_FELIX_DB_SECRET'].encode())  # uses envvar
        except KeyError:
            raise OSError(
                "The 'NUCYPHER_FELIX_DB_SECRET' is not set.  Export your application secret and try again."
            )

        # Database
        self.db = SQLAlchemy(self.rest_app)

        # Database Tables
        class Recipient(self.db.Model):
            """
            The one and only table in Felix's database; Used to track recipients and airdrop metadata.
            """

            __tablename__ = 'recipient'

            id = self.db.Column(self.db.Integer, primary_key=True)
            address = self.db.Column(self.db.String,
                                     unique=True,
                                     nullable=False)
            joined = self.db.Column(self.db.DateTime,
                                    nullable=False,
                                    default=datetime.utcnow)
            total_received = self.db.Column(self.db.String,
                                            default='0',
                                            nullable=False)
            last_disbursement_amount = self.db.Column(self.db.String,
                                                      nullable=False,
                                                      default=0)
            last_disbursement_time = self.db.Column(self.db.DateTime,
                                                    nullable=True,
                                                    default=None)
            is_staking = self.db.Column(self.db.Boolean,
                                        nullable=False,
                                        default=False)

            def __repr__(self):
                return f'{self.__class__.__name__}(id={self.id})'

        self.Recipient = Recipient  # Bind to outer class

        # Flask decorators
        rest_app = self.rest_app
        limiter = Limiter(self.rest_app,
                          key_func=get_remote_address,
                          headers_enabled=True)

        #
        # REST Routes
        #

        @rest_app.route("/", methods=['GET'])
        @limiter.limit("100/day;20/hour;1/minute")
        def home():
            rendering = render_template(self.TEMPLATE_NAME)
            return rendering

        @rest_app.route("/register", methods=['POST'])
        @limiter.limit("5 per day")
        def register():
            """Handle new recipient registration via POST request."""
            try:
                new_address = request.form['address']
            except KeyError:
                return Response(status=400)  # TODO

            if not eth_utils.is_checksum_address(new_address):
                return Response(status=400)  # TODO

            if new_address in self.reserved_addresses:
                return Response(status=400)  # TODO

            try:
                with ThreadedSession(self.db_engine) as session:

                    existing = Recipient.query.filter_by(
                        address=new_address).all()
                    if existing:
                        # Address already exists; Abort
                        self.log.debug(f"{new_address} is already enrolled.")
                        return Response(status=400)

                    # Create the record
                    recipient = Recipient(address=new_address,
                                          joined=datetime.now())
                    session.add(recipient)
                    session.commit()

            except Exception as e:
                # Pass along exceptions to the logger
                self.log.critical(str(e))
                raise

            else:
                return Response(status=200)  # TODO

        return rest_app

    def create_tables(self) -> None:
        self.make_web_app()
        return self.db.create_all(app=self.rest_app)

    def start(self,
              host: str,
              port: int,
              web_services: bool = True,
              distribution: bool = True,
              crash_on_error: bool = False):

        self.crash_on_error = crash_on_error

        if self.start_time is not NOT_RUNNING:
            raise RuntimeError("Felix is already running.")

        self.start_time = maya.now()
        payload = {"wsgi": self.rest_app, "http_port": port}
        deployer = HendrixDeploy(action="start", options=payload)
        click.secho(f"Running {self.__class__.__name__} on {host}:{port}")

        if distribution is True:
            self.start_distribution()

        if web_services is True:
            deployer.run()  # <-- Blocking call (Reactor)

    def start_distribution(self, now: bool = True) -> bool:
        """Start token distribution"""
        self.log.info(NU_BANNER)
        self.log.info("Starting NU Token Distribution | START")
        if self.token_balance == NU.ZERO():
            raise self.ActorError(
                f"Felix address {self.checksum_address} has 0 NU tokens.")
        self._distribution_task.start(interval=self.DISTRIBUTION_INTERVAL,
                                      now=now)
        return True

    def stop_distribution(self) -> bool:
        """Start token distribution"""
        self.log.info("Stopping NU Token Distribution | STOP")
        self._distribution_task.stop()
        return True

    def __calculate_disbursement(self, recipient) -> int:
        """Calculate the next reward for a recipient once the are selected for distribution"""

        # Initial Reward - sets the future rates
        if recipient.last_disbursement_time is None:
            amount = self.INITIAL_DISBURSEMENT

        # Cap reached, We'll continue to leak the minimum disbursement
        elif int(recipient.total_received) >= self.MAXIMUM_DISBURSEMENT:
            amount = self.MINIMUM_DISBURSEMENT

        # Calculate the next disbursement
        else:
            amount = math.ceil(
                int(recipient.last_disbursement_amount) * self.MULTIPLIER)
            if amount < self.MINIMUM_DISBURSEMENT:
                amount = self.MINIMUM_DISBURSEMENT

        return int(amount)

    def __transfer(self, disbursement: int, recipient_address: str) -> str:
        """Perform a single token transfer transaction from one account to another."""

        self.__disbursement += 1
        txhash = self.token_agent.transfer(
            amount=disbursement,
            target_address=recipient_address,
            sender_address=self.checksum_address)

        if self.distribute_ether:
            ether = self.ETHER_AIRDROP_AMOUNT
            transaction = {
                'to': recipient_address,
                'from': self.checksum_address,
                'value': ether,
                'gasPrice': self.blockchain.interface.w3.eth.gasPrice
            }
            ether_txhash = self.blockchain.interface.w3.eth.sendTransaction(
                transaction)

            self.log.info(
                f"Disbursement #{self.__disbursement} OK | NU {txhash.hex()[-6:]} | ETH {ether_txhash.hex()[:6]} "
                f"({str(NU(disbursement, 'NuNit'))} + {self.ETHER_AIRDROP_AMOUNT} wei) -> {recipient_address}"
            )

        else:
            self.log.info(
                f"Disbursement #{self.__disbursement} OK | {txhash.hex()[-6:]} |"
                f"({str(NU(disbursement, 'NuNit'))} -> {recipient_address}")

        return txhash

    def airdrop_tokens(self):
        """
        Calculate airdrop eligibility via faucet registration
        and transfer tokens to selected recipients.
        """

        with ThreadedSession(self.db_engine) as session:
            population = session.query(self.Recipient).count()

        message = f"{population} registered faucet recipients; " \
                  f"Distributed {str(NU(self.__distributed, 'NuNit'))} since {self.start_time.slang_time()}."
        self.log.debug(message)
        if population is 0:
            return  # Abort - no recipients are registered.

        # For filtration
        since = datetime.now() - timedelta(hours=self.DISBURSEMENT_INTERVAL)

        datetime_filter = or_(self.Recipient.last_disbursement_time <= since,
                              self.Recipient.last_disbursement_time ==
                              None)  # This must be `==` not `is`

        with ThreadedSession(self.db_engine) as session:
            candidates = session.query(
                self.Recipient).filter(datetime_filter).all()
            if not candidates:
                self.log.info("No eligible recipients this round.")
                return

        # Discard invalid addresses, in-depth
        invalid_addresses = list()

        def siphon_invalid_entries(candidate):
            address_is_valid = eth_utils.is_checksum_address(candidate.address)
            if not address_is_valid:
                invalid_addresses.append(candidate.address)
            return address_is_valid

        candidates = list(filter(siphon_invalid_entries, candidates))

        if invalid_addresses:
            self.log.info(
                f"{len(invalid_addresses)} invalid entries detected. Pruning database."
            )

            # TODO: Is this needed? - Invalid entries are rejected at the endpoint view.
            # Prune database of invalid records
            # with ThreadedSession(self.db_engine) as session:
            #     bad_eggs = session.query(self.Recipient).filter(self.Recipient.address in invalid_addresses).all()
            #     for egg in bad_eggs:
            #         session.delete(egg.id)
            #     session.commit()

        if not candidates:
            self.log.info("No eligible recipients this round.")
            return

        d = threads.deferToThread(self.__do_airdrop, candidates=candidates)
        self._AIRDROP_QUEUE[self.__airdrop] = d
        return d

    def __do_airdrop(self, candidates: list):

        self.log.info(f"Staging Airdrop #{self.__airdrop}.")

        # Staging
        staged_disbursements = [(r, self.__calculate_disbursement(recipient=r))
                                for r in candidates]
        batches = list(
            staged_disbursements[index:index + self.BATCH_SIZE]
            for index in range(0, len(staged_disbursements), self.BATCH_SIZE))
        total_batches = len(batches)

        self.log.info("====== Staged Airdrop ======")
        for recipient, disbursement in staged_disbursements:
            self.log.info(f"{recipient.address} ... {str(disbursement)[:-18]}")
        self.log.info("==========================")

        # Staging Delay
        self.log.info(
            f"Airdrop will commence in {self.STAGING_DELAY} seconds...")
        if self.STAGING_DELAY > 3:
            time.sleep(self.STAGING_DELAY - 3)
        for i in range(3):
            time.sleep(1)
            self.log.info(f"NU Token airdrop starting in {3 - i} seconds...")

        # Slowly, in series...
        for batch, staged_disbursement in enumerate(batches, start=1):
            self.log.info(f"======= Batch #{batch} ========")

            for recipient, disbursement in staged_disbursement:

                # Perform the transfer... leaky faucet.
                self.__transfer(disbursement=disbursement,
                                recipient_address=recipient.address)
                self.__distributed += disbursement

                # Update the database record
                recipient.last_disbursement_amount = str(disbursement)
                recipient.total_received = str(
                    int(recipient.total_received) + disbursement)
                recipient.last_disbursement_time = datetime.now()

                self.db.session.add(recipient)
                self.db.session.commit()

            # end inner loop
            self.log.info(
                f"Completed Airdrop #{self.__airdrop} Batch #{batch} of {total_batches}."
            )

        # end outer loop
        now = maya.now()
        next_interval_slang = now.add(
            seconds=self.DISTRIBUTION_INTERVAL).slang_time()
        self.log.info(
            f"Completed Airdrop #{self.__airdrop}; Next airdrop is {next_interval_slang}."
        )

        del self._AIRDROP_QUEUE[self.__airdrop]
        self.__airdrop += 1
Exemple #16
0
 def load_config(self, config_file, default_log_level, logObserverFactory):
     """                                                             
     Load the configuration for this provisioner and initialize it.  
     """
     log = Logger(observer=logObserverFactory("ERROR"))
     try:
         # Load config.
         scp = load_config(config_file, defaults=self.get_config_defaults())
         section = "PROVISIONER"
         config = section2dict(scp, section)
         self.config = config
         # Start logger.
         log_level = config.get('log_level', default_log_level)
         log = Logger(observer=logObserverFactory(log_level))
         self.log = log
         log.info("Initializing SSH provisioner.",
                  event_type='init_provisioner')
         # Initialize template environment.
         self.template_env = jinja2.Environment(trim_blocks=True,
                                                lstrip_blocks=True)
         self.template_env.filters['shellquote'] = filter_shellquote
         self.template_env.filters['newline'] = filter_newline
         # Load SSH configuration info.
         try:
             self.provision_cmd = self.template_env.from_string(
                 config["provision_cmd"].strip())
             self.deprovision_cmd = self.template_env.from_string(
                 config["deprovision_cmd"].strip())
             template_str = config.get("sync_cmd", None)
             if template_str is not None:
                 log.debug("Sync command template: {template}",
                           template=template_str)
                 self.sync_cmd = self.template_env.from_string(
                     template_str.strip())
             self.provision_cmd_type = self.parse_command_type(
                 config.get("provision_cmd_type", "simple"))
             self.deprovision_cmd_type = self.parse_command_type(
                 config.get("deprovision_cmd_type", "simple"))
             self.sync_cmd_type = self.parse_command_type(
                 config.get("sync_cmd_type", "simple"))
             if self.provision_cmd_type == self.CMD_TYPE_INPUT:
                 self.provision_input = self.template_env.from_string(
                     config["provision_input"].strip())
             if self.deprovision_cmd_type == self.CMD_TYPE_INPUT:
                 self.deprovision_input = self.template_env.from_string(
                     config["deprovision_input"].strip())
             if self.sync_cmd_type == self.CMD_TYPE_INPUT:
                 template_str = config.get("sync_input", None)
                 log.debug("Sync input template: {template}",
                           template=template_str)
                 self.sync_input = self.template_env.from_string(
                     template_str.strip())
             result = config.get("provision_ok_result", None)
             if result is not None:
                 self.provision_ok_result = int(result.strip())
             result = config.get("deprovision_ok_result", None)
             if result is not None:
                 self.deprovision_ok_result = int(result.strip())
             result = config.get("sync_ok_result", None)
             if result is not None:
                 self.sync_ok_result = int(result.strip())
             self.cmd_timeout = int(config['cmd_timeout'])
             self.host = config["host"]
             self.port = int(config["port"])
             self.ssh_user = config["user"]
             self.known_hosts = os.path.expanduser(config["known_hosts"])
             if "keys" in config:
                 self.keys = config["keys"].split(",")
         except KeyError as ex:
             raise OptionMissingError(
                 "A require option was missing: '{0}:{1}'.".format(
                     section, ex.args[0]))
         self.load_groupmap(config.get("group_map", None))
     except Exception as ex:
         d = self.reactor.callLater(0, self.reactor.stop)
         log.failure("Provisioner failed to initialize: {0}".format(ex))
         raise
     return defer.succeed(None)
Exemple #17
0
class Discord(IRCClient):

	nickname = "discord"
	realname = "Discord"
	username = "******"
	versionName = "Discord"
	versionNum = "0.01"

	magicFile = "true.txt"

	def __init__(self, accessList):
		self.logger = Logger(observer=textFileLogObserver(sys.stdout))

		self.accessList = [nick.lower() for nick in accessList]

		if not os.path.exists(self.magicFile):
			self.logger.info("Creating magic file")

			try:
				with open(self.magicFile, "a"):
					pass

			except Exception as ex:
				self.logger.error("Unable to create magic file! {0}".format(ex.message))
				reactor.stop()

		self.markovGenerator = pymarkov.MarkovChainGenerator(self.magicFile)

		self.channels = []
		self.channelPhrasers = {}

		self.logger.debug("Discord initialized")

		# Maybe add hook/plugin system here?

		self.commands = Commands.Commands(self)		

	def removeChannel(self, channel):
		try:
			self.channels.remove(channel)

			self.channelPhrasers[channel].stop()
			
			del self.channelPhrasers[channel]

		except:
			self.logger.error("Error removing {channel} from collection", channel=channel)

	def insertPhrase(self, phrase):
		try:
			with open(self.magicFile, "a") as magicFile:
				magicFile.write("{0}\n".format(phrase))

			try:
				file, ext = os.path.splitext(self.magicFile)
				os.remove("{0}-pickled{1}".format(file, ext))

				# Simply re-populating the dictionary isn't enough for some reason
				self.markovGenerator = pymarkov.MarkovChainGenerator(self.magicFile, 2)

			except IOError as ex:
				self.logger.error("Unable to delete pickled file. {0}".format(ex.message))			

		except Exception as ex:
			self.logger.error("Unable to insert phrase into magic file! {0}".format(ex.message))

	def kickedFrom(self, channel, kicker, message):
		self.removeChannel(channel)

		self.logger.info("Kicked from {channel} by {kicker}", channel=channel, kicker=kicker)

	def left(self, channel):
		self.removeChannel(channel)

		self.logger.info("Left {channel}", channel=channel)

	def handleMessage(self, user, channel, message):
		senderNickname = user.split("!")[0]

		if message.startswith("~reload") and senderNickname in self.accessList:
			self.logger.info("Reloading commands module")
			self.say(channel, "Reloading.")

			try:
				commandsModule = reload(Commands)
				self.commands = commandsModule.Commands(self)

			except Exception as ex:
				self.say(channel, "Failed to load commands module - {0}".format(ex.message))

		elif message.startswith("~"):
			# Don't log commands to the brain
			commandMessage = message[1:]

			self.commands.handleCommand(user, channel, commandMessage)

		else:
			self.logger.info("Adding {message!r} to brain", message=message)

			# Avoid storing anything with the bot's name in it
			brainMessage = message.strip(self.nickname)

			self.insertPhrase(brainMessage)

			try:
				randomPhrase = self.generateSentence()

				if self.nickname in message and channel.startswith("#") and self.channelPhrasers[channel].running:
					phrase = "{0}, {1}".format(senderNickname, randomPhrase)

					self.say(channel, phrase)

				elif channel == self.nickname:
					self.logger.debug("Sending message to {nickname}", nickname=senderNickname)

					self.msg(senderNickname, randomPhrase)

				else:
					pass

			except IndexError as generationError:
				self.logger.error(generationError.message)

	def privmsg(self, user, channel, message):
		self.logger.info("Received message from {user} in {channel}", user=user, channel=channel)

		# deferToThread(self.handleMessage, user, channel, message)
		self.handleMessage(user, channel, message)

	def signedOn(self):
		self.logger.info("Signed on")

		self.join("#bots")

	def joined(self, channel):
		self.channels.append(channel)

		self.logger.info("Joined channel {channel!r}", channel=channel)

		channelPhraser = LoopingCall(self.sayRandomPhrase, channel)
		reactor.callLater(2, channelPhraser.start, 600)

		self.channelPhrasers[channel] = channelPhraser

	def generateSentence(self):
		try:
			sentence = self.markovGenerator.generate_sentence()

			sentence = sentence.strip("<{0}>".format(self.nickname))
			sentence = sentence.strip(self.nickname)

			return sentence

		except (IndexError, ValueError) as ex:
			self.logger.error(ex.message)

	def sayRandomPhrase(self, channel):
		sentence = self.generateSentence()
		self.say(channel, sentence)
Exemple #18
0
class BaseContractRegistry(ABC):
    """
    Records known contracts on the disk for future access and utility. This
    lazily writes to the filesystem during contract enrollment.

    WARNING: Unless you are developing NuCypher, you most likely won't ever need
    to use this.
    """

    logger = Logger('ContractRegistry')

    _multi_contract = True
    _contract_name = NotImplemented

    # Registry
    REGISTRY_NAME = 'contract_registry.json'  # TODO: #1511 Save registry with ID-time-based filename
    DEVELOPMENT_REGISTRY_NAME = 'dev_contract_registry.json'

    class RegistryError(Exception):
        pass

    class EmptyRegistry(RegistryError):
        pass

    class NoRegistry(RegistryError):
        pass

    class UnknownContract(RegistryError):
        pass

    class InvalidRegistry(RegistryError):
        """Raised when invalid data is encountered in the registry"""

    class CantOverwriteRegistry(RegistryError):
        pass

    def __init__(self, source=NO_REGISTRY_SOURCE, *args, **kwargs):
        self.__source = source
        self.log = Logger("registry")

    def __eq__(self, other) -> bool:
        if self is other:
            return True  # and that's all
        return bool(self.id == other.id)

    def __repr__(self) -> str:
        r = f"{self.__class__.__name__}(id={self.id[:6]})"
        return r

    @property
    def id(self) -> str:
        """Returns a hexstr of the registry contents."""
        blake = hashlib.blake2b()
        blake.update(self.__class__.__name__.encode())
        blake.update(json.dumps(self.read()).encode())
        digest = blake.digest().hex()
        return digest

    @abstractmethod
    def _destroy(self) -> None:
        raise NotImplementedError

    @abstractmethod
    def write(self, registry_data: list) -> None:
        raise NotImplementedError

    @abstractmethod
    def read(self) -> Union[list, dict]:
        raise NotImplementedError

    @classmethod
    def from_latest_publication(cls,
                                *args,
                                source_manager=None,
                                network: str = NetworksInventory.DEFAULT,
                                **kwargs) -> 'BaseContractRegistry':
        """
        Get the latest contract registry available from a registry source chain.
        """
        if not source_manager:
            source_manager = RegistrySourceManager()

        registry_data, source = source_manager.fetch_latest_publication(
            registry_class=cls, network=network)

        registry_instance = cls(*args, source=source, **kwargs)
        registry_instance.write(registry_data=json.loads(registry_data))
        return registry_instance

    @property
    def source(self) -> 'CanonicalRegistrySource':
        return self.__source

    @property
    def enrolled_names(self) -> Iterator:
        entries = iter(record[0] for record in self.read())
        return entries

    @property
    def enrolled_addresses(self) -> Iterator:
        entries = iter(record[1] for record in self.read())
        return entries

    def enroll(self, contract_name, contract_address, contract_abi,
               contract_version) -> None:
        """
        Enrolls a contract to the chain registry by writing the name, address,
        and abi information to the filesystem as JSON.

        Note: Unless you are developing NuCypher, you most likely won't ever
        need to use this.
        """
        contract_data = [
            contract_name, contract_version, contract_address, contract_abi
        ]
        try:
            registry_data = self.read()
        except self.RegistryError:
            self.log.info(
                "Blank registry encountered: enrolling {}:{}:{}".format(
                    contract_name, contract_version, contract_address))
            registry_data = list()  # empty registry

        registry_data.append(contract_data)
        self.write(registry_data)
        self.log.info("Enrolled {}:{}:{} into registry.".format(
            contract_name, contract_version, contract_address))

    def search(self,
               contract_name: str = None,
               contract_version: str = None,
               contract_address: str = None) -> tuple:
        """
        Searches the registry for a contract with the provided name or address
        and returns the contracts component data.
        """
        if not (bool(contract_name) ^ bool(contract_address)):
            raise ValueError(
                "Pass contract_name or contract_address, not both.")
        if bool(contract_version) and not bool(contract_name):
            raise ValueError(
                "Pass contract_version together with contract_name.")

        contracts = list()
        registry_data = self.read()

        try:
            for contract in registry_data:
                if len(contract) == 3:
                    name, address, abi = contract
                    version = None
                else:
                    name, version, address, abi = contract
                if contract_name == name and \
                        (contract_version is None or version == contract_version) or \
                        contract_address == address:
                    contracts.append((name, version, address, abi))
        except ValueError:
            message = "Missing or corrupted registry data"
            self.log.critical(message)
            raise self.InvalidRegistry(message)

        if not contracts:
            raise self.UnknownContract(contract_name)

        if contract_address and len(contracts) > 1:
            m = f"Multiple records returned for address {contract_address}"
            self.log.critical(m)
            raise self.InvalidRegistry(m)

        result = tuple(contracts) if contract_name else contracts[0]
        return result
Exemple #19
0
class SolidityCompiler:

    # TODO: Integrate with config classes

    __default_version = 'v0.4.24'
    __default_configuration_path = os.path.join(dirname(abspath(__file__)),
                                                './compiler.json')

    __default_sol_binary_path = shutil.which('solc')
    if __default_sol_binary_path is None:
        __bin_path = os.path.dirname(shutil.which('python'))  # type: str
        __default_sol_binary_path = os.path.join(__bin_path,
                                                 'solc')  # type: str

    __default_contract_dir = os.path.join(dirname(abspath(__file__)), 'source',
                                          'contracts')
    __default_chain_name = 'tester'

    def __init__(self,
                 solc_binary_path: str = None,
                 configuration_path: str = None,
                 chain_name: str = None,
                 source_dir: str = None,
                 test_contract_dir: str = None) -> None:

        self.log = Logger('solidity-compiler')
        # Compiler binary and root solidity source code directory
        self.__sol_binary_path = solc_binary_path if solc_binary_path is not None else self.__default_sol_binary_path
        self.source_dir = source_dir if source_dir is not None else self.__default_contract_dir
        self._test_solidity_source_dir = test_contract_dir

        # JSON config
        self.__configuration_path = configuration_path if configuration_path is not None else self.__default_configuration_path
        self._chain_name = chain_name if chain_name is not None else self.__default_chain_name

        # Set the local env's solidity compiler binary
        os.environ['SOLC_BINARY'] = self.__sol_binary_path

    def install_compiler(self, version: str = None):
        """
        Installs the specified solidity compiler version.
        https://github.com/ethereum/py-solc#installing-the-solc-binary
        """
        version = version if version is not None else self.__default_version
        return install_solc(version, platform=None)  # TODO: fix path

    def compile(self) -> dict:
        """Executes the compiler with parameters specified in the json config"""

        self.log.info("Using solidity compiler binary at {}".format(
            self.__sol_binary_path))
        self.log.info("Compiling solidity source files at {}".format(
            self.source_dir))

        source_paths = set()
        source_walker = os.walk(top=self.source_dir, topdown=True)
        if self._test_solidity_source_dir:
            test_source_walker = os.walk(top=self._test_solidity_source_dir,
                                         topdown=True)
            source_walker = itertools.chain(source_walker, test_source_walker)

        for root, dirs, files in source_walker:
            for filename in files:
                if filename.endswith('.sol'):
                    path = os.path.join(root, filename)
                    source_paths.add(path)
                    self.log.debug(
                        "Collecting solidity source {}".format(path))

        # Compile with remappings: https://github.com/ethereum/py-solc
        project_root = dirname(self.source_dir)

        remappings = (
            "contracts={}".format(self.source_dir),
            "zeppelin={}".format(os.path.join(project_root, 'zeppelin')),
        )

        self.log.info("Compiling with import remappings {}".format(
            ", ".join(remappings)))

        optimization_runs = 10  # TODO: Move..?
        try:
            compiled_sol = compile_files(source_files=source_paths,
                                         import_remappings=remappings,
                                         allow_paths=project_root,
                                         optimize=optimization_runs)

            self.log.info(
                "Successfully compiled {} contracts with {} optimization runs".
                format(len(compiled_sol), optimization_runs))

        except FileNotFoundError:
            raise RuntimeError(
                "The solidity compiler is not at the specified path. "
                "Check that the file exists and is executable.")
        except PermissionError:
            raise RuntimeError(
                "The solidity compiler binary at {} is not executable. "
                "Check the file's permissions.".format(self.__sol_binary_path))

        except SolcError:
            raise

        # Cleanup the compiled data keys
        interfaces = {
            name.split(':')[-1]: compiled_sol[name]
            for name in compiled_sol
        }
        return interfaces
app.state = "idle"
app.iterationsToGo = 0
app.snapshot = 'No Snapshot'
app.mainSockets = []
app.modelSockets = []
app.buildSockets = []
app.models = []
app.model = ""
app.accuracy_arr = []
app.train_err_arr = []

app.logvar = LogCapture(app)
# log.startLogging(sys.stdout) # Print to actual console
# globalLogBeginner.beginLoggingTo([app.logvar], redirectStandardIO=True)
# log.startLogging(app.logvar)
log.info("wow")




@gen.coroutine
def main():
	# http_server = tornado.httpserver.HTTPServer(app, ssl_options={
	# 	"certfile": "ssl\certificate.crt",
	# 	"keyfile": "ssl\privatekey.key",
	# })	
	# http_server.listen(443)
	app.listen(80)
	tornado.ioloop.IOLoop.current().spawn_callback(consumer)
	# future = futureCreator(consumer)
	# gen.with_timeout(time.time() + 100, future)
     user = yield self.factory.resolve_user(request.dn)
 except UserMappingError:
     # User could not be found
     log.info('Could not resolve {dn!r} to user', dn=request.dn)
     result = (False, 'Invalid user.')
 except RealmMappingError, e:
     # Realm could not be mapped
     log.info('Could not resolve {dn!r} to realm: {message!r}',
              dn=request.dn,
              message=e.message)
     # TODO: too much information revealed?
     result = (False, 'Could not determine realm.')
 else:
     log.info('Resolved {dn!r} to {user!r}@{realm!r} ({marker!r})',
              dn=request.dn,
              user=user,
              realm=realm,
              marker=app_marker)
     password = request.auth
     if self.factory.is_bind_cached(request.dn, app_marker,
                                    request.auth):
         log.info('Combination found in bind cache!')
         result = (True, app_marker)
     else:
         response = yield self.request_validate(
             self.factory.validate_url, user, realm, password)
         json_body = yield readBody(response)
         if response.code == 200:
             body = json.loads(json_body)
             if body['result']['status']:
                 if body['result']['value']:
Exemple #22
0
class JSONRPCStdoutEmitter(StdoutEmitter):

    transport_serializer = json.dumps
    delimiter = '\n'

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.log = Logger("JSON-RPC-Emitter")

    class JSONRPCError(RuntimeError):
        code = None
        message = "Unknown JSON-RPC Error"

    class ParseError(JSONRPCError):
        code = -32700
        message = "Invalid JSON was received by the server."

    class InvalidRequest(JSONRPCError):
        code = -32600
        message = "The JSON sent is not a valid Request object."

    class MethodNotFound(JSONRPCError):
        code = -32601
        message = "The method does not exist / is not available."

    class InvalidParams(JSONRPCError):
        code = -32602
        message = "Invalid method parameter(s)."

    class InternalError(JSONRPCError):
        code = -32603
        message = "Internal JSON-RPC error."

    @staticmethod
    def assemble_response(response: dict, message_id: int) -> dict:
        response_data = {'jsonrpc': '2.0',
                         'id': str(message_id),
                         'result': response}
        return response_data

    @staticmethod
    def assemble_error(message, code, data=None) -> dict:
        response_data = {'jsonrpc': '2.0',
                         'error': {'code': str(code),
                                   'message': str(message),
                                   'data': data},
                         'id': None}  # error has no ID
        return response_data

    def __serialize(self, data: dict, delimiter=delimiter, as_bytes: bool = False) -> Union[str, bytes]:

        # Serialize
        serialized_response = JSONRPCStdoutEmitter.transport_serializer(data)   # type: str

        if as_bytes:
            serialized_response = bytes(serialized_response, encoding='utf-8')  # type: bytes

        # Add delimiter
        if delimiter:
            if as_bytes:
                delimiter = bytes(delimiter, encoding='utf-8')
            serialized_response = delimiter + serialized_response

        return serialized_response

    def __write(self, data: dict):
        """Outlet"""

        serialized_response = self.__serialize(data=data)

        # Write to stdout file descriptor
        number_of_written_bytes = self.sink(serialized_response)  # < ------ OUTLET
        return number_of_written_bytes

    def clear(self):
        pass

    def message(self, message: str, **kwds):
        pass

    def echo(self, *args, **kwds):
        pass

    def banner(self, banner):
        pass

    def ipc(self, response: dict, request_id: int, duration) -> int:
        """
        Write RPC response object to stdout and return the number of bytes written.
        """

        # Serialize JSON RPC Message
        assembled_response = self.assemble_response(response=response, message_id=request_id)
        size = self.__write(data=assembled_response)
        self.log.info(f"OK | Responded to IPC request #{request_id} with {size} bytes, took {duration}")
        return size

    def error(self, e):
        """
        Write RPC error object to stdout and return the number of bytes written.
        """
        try:
            assembled_error = self.assemble_error(message=e.message, code=e.code)
        except AttributeError:
            if not isinstance(e, self.JSONRPCError):
                self.log.info(str(e))
                raise e  # a different error was raised
            else:
                raise self.JSONRPCError

        size = self.__write(data=assembled_error)
        # self.log.info(f"Error {e.code} | {e.message}")  # TODO: Restore this log message
        return size
class RuntimeCalculator:
    def __init__(self, lock, addr='localhost', port='6800'):
        config = Config()
        self.lock = lock
        self.user_name = config.get('auth_username', '')
        self.user_password = config.get('auth_password', '')
        self.clear_at_start = config.get('clear_up_database_when_start', 'yes')
        self.observation_times = int(config.get('observation_times', '20'))
        self.strict_mode = config.get('strict_mode', 'no')
        self.strict_degree = int(config.get('strict_degree',
                                            '4'))  # 严格模式的严格程度,取值大于零,数值越小越严格
        self.db = glv.get_value(key='sqlite_db')
        self.runtime_log = Logger(namespace='- Runtime Collector -')
        self.terminator_log = Logger(namespace='- TERMINATOR -')
        self.sep_time = 1 * 60  # 每次收集时间间隔 1 分钟
        self.terminator_scan_sep = 20
        self.server_port = 'http://localhost:{}/'.format(port)
        self.jobs_url = self.server_port + 'listjobs.json'
        # self.jobs_url = 'http://localhost:6800/listjobs.json'

    def list_the_spiders(self, spider_list):
        dic = dict()
        if spider_list:
            for spider_dic in spider_list:
                spider_name = [x for x in spider_dic.keys()][0]
                runtime = int([x for x in spider_dic.values()][0])
                if dic.get(spider_name):
                    dic[spider_name].append(runtime)
                else:
                    dic[spider_name] = list()
                    dic[spider_name].append(runtime)
        return dic

    def unusual_spider(self,
                       project,
                       name_of_spider,
                       runtime_of_spider,
                       save_to_database=True):
        self.lock.acquire()
        res_from_db = self.db.get(model_name='SpiderScheduleModel',
                                  key_list=['project', 'spider'],
                                  filter_dic={'status': '3'})
        data = self.db.get(model_name='SpiderMonitor',
                           key_list=['spider', 'runtime'],
                           filter_dic={'project': project})
        self.lock.release()
        top_set = {'{}-{}'.format(x.project, x.spider) for x in res_from_db}
        item = '{}-{}'.format(project, name_of_spider)
        if item in top_set:
            return -1100
        spider_list_temp = [{
            x.spider: x.runtime
        } for x in data] if data else []
        spider_dic_temp = self.list_the_spiders(spider_list_temp)
        over_time = -1000
        if spider_dic_temp:
            time_list = spider_dic_temp.get(name_of_spider)
            if time_list and len(time_list) > self.observation_times:
                std = np.std(time_list, ddof=1)
                if self.strict_mode == "yes":
                    time_list_set = set(time_list)
                    expectation = sum([
                        x * (time_list.count(x) / len(time_list))
                        for x in time_list_set
                    ])  # 数学期望
                    over_time = runtime_of_spider - (
                        std * self.strict_degree + expectation
                    )  # 严格模式,样本偏差加上数学期望
                else:
                    over_time = runtime_of_spider - (std + max(time_list)
                                                     )  # 非严格模式,样本偏差加上最大值,确保不误杀
                if over_time > 0:
                    if save_to_database:
                        self.lock.acquire()
                        unusual_spider_data = self.db.get(
                            model_name='UnormalSpider', key_list=['spider'])
                        self.lock.release()
                        unusual_spiders_set = set([
                            x.spider for x in unusual_spider_data
                        ]) if unusual_spider_data else {}
                        if name_of_spider not in unusual_spiders_set:
                            self.lock.acquire()
                            self.db.add(model=UnormalSpider,
                                        add_dic={'spider': name_of_spider})
                            self.lock.release()
        return over_time

    def save_spider_runtime(self):
        if self.clear_at_start == 'yes':
            self.lock.acquire()
            self.db.delete_data(model_name='SpiderMonitor')
            self.db.delete_data(model_name='UnormalSpider')
            self.db.delete_data(model_name='TerminatedSpider')
            self.lock.release()
            self.runtime_log.warn(
                'spider running recorder database has been clean up')
            self.runtime_log.info('database type: sqlite')
            self.runtime_log.info('each spider observation times: {}'.format(
                self.observation_times))
            self.runtime_log.info(
                'is unusual spider runtime calculation in strict mode: {}'.
                format(self.strict_mode))
            if self.strict_mode == 'yes':
                self.runtime_log.info('strict mode value: {}'.format(
                    self.strict_degree))
        time.sleep(3)
        while True:
            self.lock.acquire()
            job_res = self.db.get(model_name='SpiderMonitor',
                                  key_list=['job_id'],
                                  return_model_map=True)
            self.lock.release()
            job_ids = set([x.job_id for x in job_res]) if job_res else set()
            save_sta = False
            for s_lis in self.runtime_monitor():
                if s_lis:
                    project, spider_name, runtime, job_id = s_lis
                    if self.unusual_spider(
                            project, spider_name, runtime,
                            save_to_database=False) <= 0:
                        filter_dic = {
                            'spider': spider_name,
                            'project': project
                        }
                        self.database_limit_ctrl(model_name='SpiderMonitor',
                                                 filter_dic=filter_dic,
                                                 limit=1000)
                        if job_id and job_id not in job_ids:
                            self.lock.acquire()
                            self.db.add(model=SpiderMonitor,
                                        add_dic={
                                            'project': project,
                                            'spider': spider_name,
                                            'runtime': runtime,
                                            'job_id': job_id
                                        })
                            self.lock.release()
                            save_sta = True
            if save_sta:
                self.runtime_log.info('spider runtime saved')
            time.sleep(self.sep_time)

    def database_limit_ctrl(self, model_name, filter_dic, limit=1000):
        self.lock.acquire(blocking=True)
        res = self.db.get(model_name=model_name,
                          key_list=['id', 'create_time'],
                          filter_dic=filter_dic)
        self.lock.release()
        if res:
            id_res = [x.id for x in res]
            id_res.sort(reverse=True)
            if len(id_res) > limit:
                if limit > 100:
                    limit = random.randint(100, limit)
                remove_ids = id_res[limit:]
                r_ids_dic_lis = [{'id': k} for k in remove_ids]
                for r_id_dic in r_ids_dic_lis:
                    self.lock.acquire()
                    self.db.delete_data(model_name=model_name,
                                        filter_dic=r_id_dic)
                    self.lock.release()
            time_res = {str(TP(y.create_time)): y.id for y in res}
            over_time = 30 * 24 * 60 * 60
            ids = [time_res.get(t) for t in time_res if int(t) > over_time]
            ids_dic_lis = [{'id': j} for j in ids]
            for id_dic in ids_dic_lis:
                self.lock.acquire()
                self.db.delete_data(model_name=model_name, filter_dic=id_dic)
                self.lock.release()

    def runtime_monitor(self, req_spider=''):
        res = requests.get(url=self.jobs_url)
        spider_list = list()
        spiders_dic = dict()
        if res:
            rank_list = json.loads(res.content).get('finished')
            if rank_list:
                for each_spider in rank_list:
                    project = each_spider.get('project')
                    spider_name = each_spider.get('spider')
                    job_id = each_spider.get('id')
                    s_time = each_spider.get('start_time').split('.')[0]
                    e_time = each_spider.get('end_time').split('.')[0]
                    runtime = TPT(s_time, e_time)
                    spider_list.append([project, spider_name, runtime, job_id])
                    if not spiders_dic.get(spider_name):
                        spiders_dic[spider_name] = list()
                        spiders_dic[spider_name].append(runtime)
                    else:
                        spiders_dic[spider_name].append(runtime)
        if req_spider and spiders_dic:
            return sum(spiders_dic.get(req_spider)) // len(
                spiders_dic.get(req_spider))
        return spider_list

    def time_format(self, strtime):
        strtime = str(strtime)

        times = [x.strip() for x in strtime.split('d')]
        if len(times) > 1:
            d = int(times[0])
            hms = times[1]
        else:
            d = 0
            hms = times[0]

        h, m, s = [int(x.strip()) for x in hms.split(":") if x and x.strip()]
        seconds = d * 24 * 60 * 60 + h * 60 * 60 + m * 60 + s
        return seconds

    def terminator(self):
        time.sleep(3)
        self.terminator_log.info('Terminator Started')
        while True:
            res = json.loads(requests.get(url=self.jobs_url).content)
            if res.get('status') == 'ok':
                running_spiders = res.get('running')
                if running_spiders:
                    kill_lis = list()
                    for running_spider in running_spiders:
                        project = running_spider.get('project')
                        spider = running_spider.get('spider')
                        job_id = running_spider.get('id')
                        PID = running_spider.get('pid')
                        start_time = running_spider.get('start_time')
                        time_passed = self.time_passed(start_time)
                        if self.unusual_spider(project=project,
                                               name_of_spider=spider,
                                               runtime_of_spider=time_passed,
                                               save_to_database=False) > 0:
                            if project and job_id:
                                term = threading.Thread(
                                    target=self.kill_spider,
                                    args=(project, job_id, spider, PID))
                                term.setDaemon(True)
                                term.start()
                                kill_lis.append("{}-{}-{}".format(
                                    project, spider, job_id))
                            else:
                                p_name = '<project name> ' if not project else '[{}]'.format(
                                    project)
                                j_id = '<job id>' if not job_id else '[{}]'.format(
                                    job_id)
                                missing_data = p_name + j_id
                                self.terminator_log.warn(
                                    'Target info {} missing , Unable to locate the target!'
                                    .format(missing_data))
                                pass
                    ter_msg = 'Scan completed'
                    if kill_lis:
                        ter_msg += ', Terminated target: {}'.format(
                            str(kill_lis))
                    self.terminator_log.warn(ter_msg)
            time.sleep(self.terminator_scan_sep)

    def kill_spider(self, project, job_id, spider, PID):
        kill_url = os.path.join(self.server_port, 'cancel.json')
        self.terminator_log.warn('\n\n\tTarget Found! >>> {}  {} <<<\n'.format(
            spider, job_id))
        self.terminator_log.warn(
            'terminate the spider "{}" within 3 seconds'.format(spider))
        time.sleep(2)
        self.terminator_log.warn('sending terminate signal...')
        body = {"project": project, "job": job_id}
        try:
            target_killed = False
            for _ in range(2):
                res = json.loads(
                    requests.post(url=kill_url, data=body).content)
                self.terminator_log.warn(
                    'terminate signal has been sanded [{}]'.format(_))
                kill_status = res.get('status')
                kill_prevstate = res.get('prevstate')
                if kill_status == 'ok' and kill_prevstate not in {
                        'running', 'pending'
                }:
                    target_killed = True
                    break
                time.sleep(0.5)
            if target_killed:
                self.terminator_log.warn(
                    'Target [ {} ] has been terminated {}\n'.format(
                        spider,
                        time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
                self.lock.acquire()
                self.db.add(model=TerminatedSpider,
                            add_dic={
                                'spider': spider,
                                'job_id': job_id
                            })
                self.lock.release()
            else:
                raise ValueError('Signal sended, but the target still running')
        except Exception as E:
            self.terminator_log.warn(
                'sth goes wrong when sending the terminate signal : {}'.format(
                    E))
            self.terminator_log.warn('trying to terminate it with PID...')
            try:
                os.kill(int(PID), signal.SIGKILL)
                self.lock.acquire()
                self.db.add(model=TerminatedSpider,
                            add_dic={
                                'spider': spider,
                                'job_id': job_id
                            })
                self.lock.release()
                self.terminator_log.warn(
                    'Target [{}] has been terminated {}\n'.format(
                        spider,
                        time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
            except:
                os.popen('taskkill.exe /pid:' + str(PID))
                self.lock.acquire()
                self.db.add(model=TerminatedSpider,
                            add_dic={
                                'spider': spider,
                                'job_id': job_id
                            })
                self.lock.release()
                self.terminator_log.warn(
                    'Target [{}] has been terminated {}\n'.format(
                        spider,
                        time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))

    def time_passed(self, date_time):
        date_time = date_time.strip()
        if len(date_time.split(' ')) < 2:
            date_time = date_time + " 00:00:00"
        last_news_date = date_time.split(" ")[0].split('-')
        last_news_time = date_time.split(" ")[1].split(':')
        for t in last_news_time:
            last_news_date.append(t)
        ls = [
            int(x) if '.' not in x else int(float(x)) for x in last_news_date
        ]
        secs = "(datetime.datetime.now() - " \
               "datetime.datetime({},{},{},{},{},{})).total_seconds()".format(ls[0], ls[1], ls[2], ls[3], ls[4], ls[5])
        secs = round(eval(secs))
        return secs
Exemple #24
0
def estimate_gas(analyzer: AnalyzeGas = None) -> None:
    """
    Execute a linear sequence of NyCypher transactions mimicking
    post-deployment usage on a local PyEVM blockchain;
    Record the resulting estimated transaction gas expenditure.

    Note: The function calls below are *order dependant*
    """

    #
    # Setup
    #

    if analyzer is None:
        analyzer = AnalyzeGas()

    log = Logger(AnalyzeGas.LOG_NAME)

    # Blockchain
    testerchain, agents = TesterBlockchain.bootstrap_network()
    web3 = testerchain.interface.w3

    # Accounts
    origin, ursula1, ursula2, ursula3, alice1, *everyone_else = testerchain.interface.w3.eth.accounts

    # Contracts
    token_agent = NucypherTokenAgent(blockchain=testerchain)
    miner_agent = MinerAgent(blockchain=testerchain)
    policy_agent = PolicyAgent(blockchain=testerchain)
    adjudicator_agent = MiningAdjudicatorAgent()

    # Contract Callers
    token_functions = token_agent.contract.functions
    miner_functions = miner_agent.contract.functions
    policy_functions = policy_agent.contract.functions
    adjudicator_functions = adjudicator_agent.contract.functions

    analyzer.start_collection()
    print("********* Estimating Gas *********")

    #
    # Pre deposit tokens
    #
    tx = token_functions.approve(miner_agent.contract_address,
                                 MIN_ALLOWED_LOCKED * 5).transact(
                                     {'from': origin})
    testerchain.wait_for_receipt(tx)
    log.info("Pre-deposit tokens for 5 owners = " + str(
        miner_functions.preDeposit(everyone_else[0:5], [MIN_ALLOWED_LOCKED] *
                                   5, [MIN_LOCKED_PERIODS] *
                                   5).estimateGas({'from': origin})))

    #
    # Give Ursula and Alice some coins
    #
    log.info("Transfer tokens = " + str(
        token_functions.transfer(ursula1, MIN_ALLOWED_LOCKED *
                                 10).estimateGas({'from': origin})))
    tx = token_functions.transfer(ursula1, MIN_ALLOWED_LOCKED * 10).transact(
        {'from': origin})
    testerchain.wait_for_receipt(tx)
    tx = token_functions.transfer(ursula2, MIN_ALLOWED_LOCKED * 10).transact(
        {'from': origin})
    testerchain.wait_for_receipt(tx)
    tx = token_functions.transfer(ursula3, MIN_ALLOWED_LOCKED * 10).transact(
        {'from': origin})
    testerchain.wait_for_receipt(tx)

    #
    # Ursula and Alice give Escrow rights to transfer
    #
    log.info("Approving transfer = " + str(
        token_functions.approve(miner_agent.contract_address,
                                MIN_ALLOWED_LOCKED *
                                6).estimateGas({'from': ursula1})))
    tx = token_functions.approve(miner_agent.contract_address,
                                 MIN_ALLOWED_LOCKED * 6).transact(
                                     {'from': ursula1})
    testerchain.wait_for_receipt(tx)
    tx = token_functions.approve(miner_agent.contract_address,
                                 MIN_ALLOWED_LOCKED * 6).transact(
                                     {'from': ursula2})
    testerchain.wait_for_receipt(tx)
    tx = token_functions.approve(miner_agent.contract_address,
                                 MIN_ALLOWED_LOCKED * 6).transact(
                                     {'from': ursula3})
    testerchain.wait_for_receipt(tx)

    #
    # Ursula and Alice transfer some tokens to the escrow and lock them
    #
    log.info("First initial deposit tokens = " + str(
        miner_functions.deposit(MIN_ALLOWED_LOCKED * 3, MIN_LOCKED_PERIODS).
        estimateGas({'from': ursula1})))
    tx = miner_functions.deposit(
        MIN_ALLOWED_LOCKED * 3, MIN_LOCKED_PERIODS).transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    log.info("Second initial deposit tokens = " + str(
        miner_functions.deposit(MIN_ALLOWED_LOCKED * 3, MIN_LOCKED_PERIODS).
        estimateGas({'from': ursula2})))
    tx = miner_functions.deposit(
        MIN_ALLOWED_LOCKED * 3, MIN_LOCKED_PERIODS).transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    log.info("Third initial deposit tokens = " + str(
        miner_functions.deposit(MIN_ALLOWED_LOCKED * 3, MIN_LOCKED_PERIODS).
        estimateGas({'from': ursula3})))
    tx = miner_functions.deposit(
        MIN_ALLOWED_LOCKED * 3, MIN_LOCKED_PERIODS).transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    #
    # Wait 1 period and confirm activity
    #
    testerchain.time_travel(periods=1)
    log.info(
        "First confirm activity = " +
        str(miner_functions.confirmActivity().estimateGas({'from': ursula1})))
    tx = miner_functions.confirmActivity().transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    log.info(
        "Second confirm activity = " +
        str(miner_functions.confirmActivity().estimateGas({'from': ursula2})))
    tx = miner_functions.confirmActivity().transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    log.info(
        "Third confirm activity = " +
        str(miner_functions.confirmActivity().estimateGas({'from': ursula3})))
    tx = miner_functions.confirmActivity().transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    #
    # Wait 1 period and mint tokens
    #
    testerchain.time_travel(periods=1)
    log.info("First mining (1 stake) = " +
             str(miner_functions.mint().estimateGas({'from': ursula1})))
    tx = miner_functions.mint().transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    log.info("Second mining (1 stake) = " +
             str(miner_functions.mint().estimateGas({'from': ursula2})))
    tx = miner_functions.mint().transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    log.info("Third/last mining (1 stake) = " +
             str(miner_functions.mint().estimateGas({'from': ursula3})))
    tx = miner_functions.mint().transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    log.info(
        "First confirm activity again = " +
        str(miner_functions.confirmActivity().estimateGas({'from': ursula1})))
    tx = miner_functions.confirmActivity().transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    log.info(
        "Second confirm activity again = " +
        str(miner_functions.confirmActivity().estimateGas({'from': ursula2})))
    tx = miner_functions.confirmActivity().transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    log.info(
        "Third confirm activity again = " +
        str(miner_functions.confirmActivity().estimateGas({'from': ursula3})))
    tx = miner_functions.confirmActivity().transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    #
    # Confirm again
    #
    testerchain.time_travel(periods=1)
    log.info(
        "First confirm activity + mint = " +
        str(miner_functions.confirmActivity().estimateGas({'from': ursula1})))
    tx = miner_functions.confirmActivity().transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    log.info(
        "Second confirm activity + mint = " +
        str(miner_functions.confirmActivity().estimateGas({'from': ursula2})))
    tx = miner_functions.confirmActivity().transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    log.info(
        "Third confirm activity + mint = " +
        str(miner_functions.confirmActivity().estimateGas({'from': ursula3})))
    tx = miner_functions.confirmActivity().transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    #
    # Get locked tokens
    #
    log.info("Getting locked tokens = " +
             str(miner_functions.getLockedTokens(ursula1).estimateGas()))

    #
    # Wait 1 period and withdraw tokens
    #
    testerchain.time_travel(periods=1)
    log.info("First withdraw = " +
             str(miner_functions.withdraw(1).estimateGas({'from': ursula1})))
    tx = miner_functions.withdraw(1).transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    log.info("Second withdraw = " +
             str(miner_functions.withdraw(1).estimateGas({'from': ursula2})))
    tx = miner_functions.withdraw(1).transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    log.info("Third withdraw = " +
             str(miner_functions.withdraw(1).estimateGas({'from': ursula3})))
    tx = miner_functions.withdraw(1).transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    #
    # Confirm activity with re-stake
    #
    tx = miner_functions.setReStake(True).transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    tx = miner_functions.setReStake(True).transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    tx = miner_functions.setReStake(True).transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    log.info(
        "First confirm activity + mint with re-stake = " +
        str(miner_functions.confirmActivity().estimateGas({'from': ursula1})))
    tx = miner_functions.confirmActivity().transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    log.info(
        "Second confirm activity + mint with re-stake  = " +
        str(miner_functions.confirmActivity().estimateGas({'from': ursula2})))
    tx = miner_functions.confirmActivity().transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    log.info(
        "Third confirm activity + mint with re-stake  = " +
        str(miner_functions.confirmActivity().estimateGas({'from': ursula3})))
    tx = miner_functions.confirmActivity().transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    tx = miner_functions.setReStake(False).transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    tx = miner_functions.setReStake(False).transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    tx = miner_functions.setReStake(False).transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    #
    # Wait 2 periods and confirm activity after downtime
    #
    testerchain.time_travel(periods=2)
    log.info(
        "First confirm activity after downtime = " +
        str(miner_functions.confirmActivity().estimateGas({'from': ursula1})))
    tx = miner_functions.confirmActivity().transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    log.info(
        "Second confirm activity after downtime  = " +
        str(miner_functions.confirmActivity().estimateGas({'from': ursula2})))
    tx = miner_functions.confirmActivity().transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    log.info(
        "Third confirm activity after downtime  = " +
        str(miner_functions.confirmActivity().estimateGas({'from': ursula3})))
    tx = miner_functions.confirmActivity().transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    #
    # Ursula and Alice deposit some tokens to the escrow again
    #
    log.info("First deposit tokens again = " + str(
        miner_functions.deposit(MIN_ALLOWED_LOCKED * 2, MIN_LOCKED_PERIODS).
        estimateGas({'from': ursula1})))
    tx = miner_functions.deposit(
        MIN_ALLOWED_LOCKED * 2, MIN_LOCKED_PERIODS).transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    log.info("Second deposit tokens again = " + str(
        miner_functions.deposit(MIN_ALLOWED_LOCKED * 2, MIN_LOCKED_PERIODS).
        estimateGas({'from': ursula2})))
    tx = miner_functions.deposit(
        MIN_ALLOWED_LOCKED * 2, MIN_LOCKED_PERIODS).transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    log.info("Third deposit tokens again = " + str(
        miner_functions.deposit(MIN_ALLOWED_LOCKED * 2, MIN_LOCKED_PERIODS).
        estimateGas({'from': ursula3})))
    tx = miner_functions.deposit(
        MIN_ALLOWED_LOCKED * 2, MIN_LOCKED_PERIODS).transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    #
    # Wait 1 period and mint tokens
    #
    testerchain.time_travel(periods=1)
    log.info("First mining again = " +
             str(miner_functions.mint().estimateGas({'from': ursula1})))
    tx = miner_functions.mint().transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    log.info("Second mining again = " +
             str(miner_functions.mint().estimateGas({'from': ursula2})))
    tx = miner_functions.mint().transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    log.info("Third/last mining again = " +
             str(miner_functions.mint().estimateGas({'from': ursula3})))
    tx = miner_functions.mint().transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    #
    # Create policy
    #
    policy_id_1 = os.urandom(int(Policy.POLICY_ID_LENGTH))
    policy_id_2 = os.urandom(int(Policy.POLICY_ID_LENGTH))
    number_of_periods = 10
    log.info("First creating policy (1 node, 10 periods) = " + str(
        policy_functions.createPolicy(policy_id_1, number_of_periods, 0,
                                      [ursula1]).estimateGas({
                                          'from': alice1,
                                          'value': 10000
                                      })))
    tx = policy_functions.createPolicy(policy_id_1, number_of_periods, 0,
                                       [ursula1]).transact({
                                           'from': alice1,
                                           'value': 10000
                                       })
    testerchain.wait_for_receipt(tx)
    log.info("Second creating policy (1 node, 10 periods) = " + str(
        policy_functions.createPolicy(policy_id_2, number_of_periods, 0,
                                      [ursula1]).estimateGas({
                                          'from': alice1,
                                          'value': 10000
                                      })))
    tx = policy_functions.createPolicy(policy_id_2, number_of_periods, 0,
                                       [ursula1]).transact({
                                           'from': alice1,
                                           'value': 10000
                                       })
    testerchain.wait_for_receipt(tx)

    #
    # Revoke policy
    #
    log.info("Revoking policy = " + str(
        policy_functions.revokePolicy(policy_id_1).estimateGas(
            {'from': alice1})))
    tx = policy_functions.revokePolicy(policy_id_1).transact({'from': alice1})
    testerchain.wait_for_receipt(tx)
    tx = policy_functions.revokePolicy(policy_id_2).transact({'from': alice1})
    testerchain.wait_for_receipt(tx)

    #
    # Create policy with more periods
    #
    policy_id_1 = os.urandom(int(Policy.POLICY_ID_LENGTH))
    policy_id_2 = os.urandom(int(Policy.POLICY_ID_LENGTH))
    policy_id_3 = os.urandom(int(Policy.POLICY_ID_LENGTH))
    number_of_periods = 100
    log.info(
        "First creating policy (1 node, " + str(number_of_periods) +
        " periods, first reward) = " + str(
            policy_functions.createPolicy(policy_id_1, number_of_periods, 50,
                                          [ursula2]).estimateGas({
                                              'from': alice1,
                                              'value': 10050
                                          })))
    tx = policy_functions.createPolicy(policy_id_1, number_of_periods, 50,
                                       [ursula2]).transact({
                                           'from': alice1,
                                           'value': 10050
                                       })
    testerchain.wait_for_receipt(tx)
    testerchain.time_travel(periods=1)
    log.info(
        "Second creating policy (1 node, " + str(number_of_periods) +
        " periods, first reward) = " + str(
            policy_functions.createPolicy(policy_id_2, number_of_periods, 50,
                                          [ursula2]).estimateGas({
                                              'from': alice1,
                                              'value': 10050
                                          })))
    tx = policy_functions.createPolicy(policy_id_2, number_of_periods, 50,
                                       [ursula2]).transact({
                                           'from': alice1,
                                           'value': 10050
                                       })
    testerchain.wait_for_receipt(tx)
    log.info(
        "Third creating policy (1 node, " + str(number_of_periods) +
        " periods, first reward) = " + str(
            policy_functions.createPolicy(policy_id_3, number_of_periods, 50,
                                          [ursula1]).estimateGas({
                                              'from': alice1,
                                              'value': 10050
                                          })))
    tx = policy_functions.createPolicy(policy_id_3, number_of_periods, 50,
                                       [ursula1]).transact({
                                           'from': alice1,
                                           'value': 10050
                                       })
    testerchain.wait_for_receipt(tx)

    #
    # Mine and revoke policy
    #
    testerchain.time_travel(periods=10)
    tx = miner_functions.confirmActivity().transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    tx = miner_functions.confirmActivity().transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)

    testerchain.time_travel(periods=1)
    log.info("First mining after downtime = " +
             str(miner_functions.mint().estimateGas({'from': ursula1})))
    tx = miner_functions.mint().transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    log.info("Second mining after downtime = " +
             str(miner_functions.mint().estimateGas({'from': ursula2})))
    tx = miner_functions.mint().transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)

    testerchain.time_travel(periods=10)
    log.info("First revoking policy after downtime = " + str(
        policy_functions.revokePolicy(policy_id_1).estimateGas(
            {'from': alice1})))
    tx = policy_functions.revokePolicy(policy_id_1).transact({'from': alice1})
    testerchain.wait_for_receipt(tx)
    log.info("Second revoking policy after downtime = " + str(
        policy_functions.revokePolicy(policy_id_2).estimateGas(
            {'from': alice1})))
    tx = policy_functions.revokePolicy(policy_id_2).transact({'from': alice1})
    testerchain.wait_for_receipt(tx)
    log.info("Second revoking policy after downtime = " + str(
        policy_functions.revokePolicy(policy_id_3).estimateGas(
            {'from': alice1})))
    tx = policy_functions.revokePolicy(policy_id_3).transact({'from': alice1})
    testerchain.wait_for_receipt(tx)

    #
    # Create policy with multiple nodes
    #
    policy_id_1 = os.urandom(int(Policy.POLICY_ID_LENGTH))
    policy_id_2 = os.urandom(int(Policy.POLICY_ID_LENGTH))
    policy_id_3 = os.urandom(int(Policy.POLICY_ID_LENGTH))
    number_of_periods = 100
    log.info("First creating policy (3 nodes, 100 periods, first reward) = " +
             str(
                 policy_functions.createPolicy(
                     policy_id_1, number_of_periods, 50,
                     [ursula1, ursula2, ursula3]).estimateGas({
                         'from': alice1,
                         'value': 30150
                     })))
    tx = policy_functions.createPolicy(policy_id_1, number_of_periods, 50,
                                       [ursula1, ursula2, ursula3]).transact({
                                           'from':
                                           alice1,
                                           'value':
                                           30150
                                       })
    testerchain.wait_for_receipt(tx)
    log.info("Second creating policy (3 nodes, 100 periods, first reward) = " +
             str(
                 policy_functions.createPolicy(
                     policy_id_2, number_of_periods, 50,
                     [ursula1, ursula2, ursula3]).estimateGas({
                         'from': alice1,
                         'value': 30150
                     })))
    tx = policy_functions.createPolicy(policy_id_2, number_of_periods, 50,
                                       [ursula1, ursula2, ursula3]).transact({
                                           'from':
                                           alice1,
                                           'value':
                                           30150
                                       })
    testerchain.wait_for_receipt(tx)
    log.info("Third creating policy (2 nodes, 100 periods, first reward) = " +
             str(
                 policy_functions.createPolicy(
                     policy_id_3, number_of_periods, 50,
                     [ursula1, ursula2]).estimateGas({
                         'from': alice1,
                         'value': 20100
                     })))
    tx = policy_functions.createPolicy(policy_id_3, number_of_periods, 50,
                                       [ursula1, ursula2]).transact({
                                           'from':
                                           alice1,
                                           'value':
                                           20100
                                       })
    testerchain.wait_for_receipt(tx)

    for index in range(5):
        tx = miner_functions.confirmActivity().transact({'from': ursula1})
        testerchain.wait_for_receipt(tx)
        tx = miner_functions.confirmActivity().transact({'from': ursula2})
        testerchain.wait_for_receipt(tx)
        tx = miner_functions.confirmActivity().transact({'from': ursula3})
        testerchain.wait_for_receipt(tx)
        testerchain.time_travel(periods=1)

    tx = miner_functions.mint().transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    tx = miner_functions.mint().transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    tx = miner_functions.mint().transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    #
    # Check regular deposit
    #
    log.info("First deposit tokens = " + str(
        miner_functions.deposit(MIN_ALLOWED_LOCKED, MIN_LOCKED_PERIODS).
        estimateGas({'from': ursula1})))
    tx = miner_functions.deposit(
        MIN_ALLOWED_LOCKED, MIN_LOCKED_PERIODS).transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    log.info("Second deposit tokens = " + str(
        miner_functions.deposit(MIN_ALLOWED_LOCKED, MIN_LOCKED_PERIODS).
        estimateGas({'from': ursula2})))
    tx = miner_functions.deposit(
        MIN_ALLOWED_LOCKED, MIN_LOCKED_PERIODS).transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    log.info("Third deposit tokens = " + str(
        miner_functions.deposit(MIN_ALLOWED_LOCKED, MIN_LOCKED_PERIODS).
        estimateGas({'from': ursula3})))
    tx = miner_functions.deposit(
        MIN_ALLOWED_LOCKED, MIN_LOCKED_PERIODS).transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    #
    # ApproveAndCall
    #
    testerchain.time_travel(periods=1)

    tx = miner_functions.mint().transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    tx = miner_functions.mint().transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    tx = miner_functions.mint().transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    log.info("First approveAndCall = " + str(
        token_functions.approveAndCall(
            miner_agent.contract_address, MIN_ALLOWED_LOCKED * 2,
            web3.toBytes(MIN_LOCKED_PERIODS)).estimateGas({'from': ursula1})))
    tx = token_functions.approveAndCall(
        miner_agent.contract_address, MIN_ALLOWED_LOCKED * 2,
        web3.toBytes(MIN_LOCKED_PERIODS)).transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    log.info("Second approveAndCall = " + str(
        token_functions.approveAndCall(
            miner_agent.contract_address, MIN_ALLOWED_LOCKED * 2,
            web3.toBytes(MIN_LOCKED_PERIODS)).estimateGas({'from': ursula2})))
    tx = token_functions.approveAndCall(
        miner_agent.contract_address, MIN_ALLOWED_LOCKED * 2,
        web3.toBytes(MIN_LOCKED_PERIODS)).transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    log.info("Third approveAndCall = " + str(
        token_functions.approveAndCall(
            miner_agent.contract_address, MIN_ALLOWED_LOCKED * 2,
            web3.toBytes(MIN_LOCKED_PERIODS)).estimateGas({'from': ursula3})))
    tx = token_functions.approveAndCall(
        miner_agent.contract_address, MIN_ALLOWED_LOCKED * 2,
        web3.toBytes(MIN_LOCKED_PERIODS)).transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    #
    # Locking tokens
    #
    testerchain.time_travel(periods=1)

    tx = miner_functions.confirmActivity().transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    tx = miner_functions.confirmActivity().transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    tx = miner_functions.confirmActivity().transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    log.info("First locking tokens = " + str(
        miner_functions.lock(MIN_ALLOWED_LOCKED, MIN_LOCKED_PERIODS).
        estimateGas({'from': ursula1})))
    tx = miner_functions.lock(MIN_ALLOWED_LOCKED,
                              MIN_LOCKED_PERIODS).transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    log.info("Second locking tokens = " + str(
        miner_functions.lock(MIN_ALLOWED_LOCKED, MIN_LOCKED_PERIODS).
        estimateGas({'from': ursula2})))
    tx = miner_functions.lock(MIN_ALLOWED_LOCKED,
                              MIN_LOCKED_PERIODS).transact({'from': ursula2})
    testerchain.wait_for_receipt(tx)
    log.info("Third locking tokens = " + str(
        miner_functions.lock(MIN_ALLOWED_LOCKED, MIN_LOCKED_PERIODS).
        estimateGas({'from': ursula3})))
    tx = miner_functions.lock(MIN_ALLOWED_LOCKED,
                              MIN_LOCKED_PERIODS).transact({'from': ursula3})
    testerchain.wait_for_receipt(tx)

    #
    # Divide stake
    #
    log.info("First divide stake = " + str(
        miner_functions.divideStake(1, MIN_ALLOWED_LOCKED, 2).estimateGas(
            {'from': ursula1})))
    tx = miner_functions.divideStake(1, MIN_ALLOWED_LOCKED,
                                     2).transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    log.info("Second divide stake = " + str(
        miner_functions.divideStake(3, MIN_ALLOWED_LOCKED, 2).estimateGas(
            {'from': ursula1})))
    tx = miner_functions.divideStake(3, MIN_ALLOWED_LOCKED,
                                     2).transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)

    #
    # Divide almost finished stake
    #
    testerchain.time_travel(periods=1)
    tx = miner_functions.confirmActivity().transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    testerchain.time_travel(periods=1)
    log.info("Divide stake (next period is not confirmed) = " + str(
        miner_functions.divideStake(0, MIN_ALLOWED_LOCKED, 2).estimateGas(
            {'from': ursula1})))
    tx = miner_functions.confirmActivity().transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    log.info("Divide stake (next period is confirmed) = " + str(
        miner_functions.divideStake(0, MIN_ALLOWED_LOCKED, 2).estimateGas(
            {'from': ursula1})))

    #
    # Slashing tests
    #
    tx = miner_functions.confirmActivity().transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    testerchain.time_travel(periods=1)
    # Deploy adjudicator to estimate slashing method in MinersEscrow contract
    adjudicator, _ = testerchain.interface.deploy_contract(
        'MiningAdjudicator', miner_agent.contract.address, ALGORITHM_SHA256,
        MIN_ALLOWED_LOCKED - 1, 0, 2, 2)
    tx = miner_functions.setMiningAdjudicator(adjudicator.address).transact()
    testerchain.wait_for_receipt(tx)
    adjudicator_functions = adjudicator.functions

    #
    # Slashing
    #
    slashing_args = generate_args_for_slashing(testerchain, ursula1)
    log.info("Slash just value = " + str(
        adjudicator_functions.evaluateCFrag(
            *slashing_args).estimateGas({'from': alice1})))
    tx = adjudicator_functions.evaluateCFrag(*slashing_args).transact(
        {'from': alice1})
    testerchain.wait_for_receipt(tx)

    deposit = miner_functions.minerInfo(ursula1).call()[0]
    unlocked = deposit - miner_functions.getLockedTokens(ursula1).call()
    tx = miner_functions.withdraw(unlocked).transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)

    sub_stakes_length = str(miner_functions.getSubStakesLength(ursula1).call())
    slashing_args = generate_args_for_slashing(testerchain, ursula1)
    log.info("First slashing one sub stake and saving old one (" +
             sub_stakes_length + " sub stakes) = " + str(
                 adjudicator_functions.evaluateCFrag(
                     *slashing_args).estimateGas({'from': alice1})))
    tx = adjudicator_functions.evaluateCFrag(*slashing_args).transact(
        {'from': alice1})
    testerchain.wait_for_receipt(tx)

    sub_stakes_length = str(miner_functions.getSubStakesLength(ursula1).call())
    slashing_args = generate_args_for_slashing(testerchain, ursula1)
    log.info("Second slashing one sub stake and saving old one (" +
             sub_stakes_length + " sub stakes) = " + str(
                 adjudicator_functions.evaluateCFrag(
                     *slashing_args).estimateGas({'from': alice1})))
    tx = adjudicator_functions.evaluateCFrag(*slashing_args).transact(
        {'from': alice1})
    testerchain.wait_for_receipt(tx)

    sub_stakes_length = str(miner_functions.getSubStakesLength(ursula1).call())
    slashing_args = generate_args_for_slashing(testerchain, ursula1)
    log.info("Third slashing one sub stake and saving old one (" +
             sub_stakes_length + " sub stakes) = " + str(
                 adjudicator_functions.evaluateCFrag(
                     *slashing_args).estimateGas({'from': alice1})))
    tx = adjudicator_functions.evaluateCFrag(*slashing_args).transact(
        {'from': alice1})
    testerchain.wait_for_receipt(tx)

    sub_stakes_length = str(miner_functions.getSubStakesLength(ursula1).call())
    slashing_args = generate_args_for_slashing(testerchain, ursula1)
    log.info("Slashing two sub stakes and saving old one (" +
             sub_stakes_length + " sub stakes) = " + str(
                 adjudicator_functions.evaluateCFrag(
                     *slashing_args).estimateGas({'from': alice1})))
    tx = adjudicator_functions.evaluateCFrag(*slashing_args).transact(
        {'from': alice1})
    testerchain.wait_for_receipt(tx)

    for index in range(18):
        tx = miner_functions.confirmActivity().transact({'from': ursula1})
        testerchain.wait_for_receipt(tx)
        testerchain.time_travel(periods=1)

    tx = miner_functions.lock(MIN_ALLOWED_LOCKED,
                              MIN_LOCKED_PERIODS).transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)
    deposit = miner_functions.minerInfo(ursula1).call()[0]
    unlocked = deposit - miner_functions.getLockedTokens(ursula1, 1).call()
    tx = miner_functions.withdraw(unlocked).transact({'from': ursula1})
    testerchain.wait_for_receipt(tx)

    sub_stakes_length = str(miner_functions.getSubStakesLength(ursula1).call())
    slashing_args = generate_args_for_slashing(testerchain, ursula1)
    log.info("Slashing two sub stakes, shortest and new one (" +
             sub_stakes_length + " sub stakes) = " + str(
                 adjudicator_functions.evaluateCFrag(
                     *slashing_args).estimateGas({'from': alice1})))
    tx = adjudicator_functions.evaluateCFrag(*slashing_args).transact(
        {'from': alice1})
    testerchain.wait_for_receipt(tx)

    sub_stakes_length = str(miner_functions.getSubStakesLength(ursula1).call())
    slashing_args = generate_args_for_slashing(testerchain, ursula1)
    log.info("Slashing three sub stakes, two shortest and new one (" +
             sub_stakes_length + " sub stakes) = " + str(
                 adjudicator_functions.evaluateCFrag(
                     *slashing_args).estimateGas({'from': alice1})))
    tx = adjudicator_functions.evaluateCFrag(*slashing_args).transact(
        {'from': alice1})
    testerchain.wait_for_receipt(tx)

    slashing_args = generate_args_for_slashing(testerchain,
                                               ursula1,
                                               corrupt=False)
    log.info("Evaluating correct CFrag = " + str(
        adjudicator_functions.evaluateCFrag(
            *slashing_args).estimateGas({'from': alice1})))
    tx = adjudicator_functions.evaluateCFrag(*slashing_args).transact(
        {'from': alice1})
    testerchain.wait_for_receipt(tx)

    print("********* All Done! *********")
Exemple #25
0
class BlockchainInterface:
    """
    Interacts with a solidity compiler and a registry in order to instantiate compiled
    ethereum contracts with the given web3 provider backend.
    """

    TIMEOUT = 600  # seconds

    DEFAULT_GAS_STRATEGY = 'medium'
    GAS_STRATEGIES = {
        'glacial': time_based.glacial_gas_price_strategy,  # 24h
        'slow': time_based.slow_gas_price_strategy,  # 1h
        'medium': time_based.medium_gas_price_strategy,  # 5m
        'fast': time_based.fast_gas_price_strategy  # 60s
    }

    process = NO_PROVIDER_PROCESS.bool_value(False)
    Web3 = Web3

    _contract_factory = VersionedContract

    class InterfaceError(Exception):
        pass

    class NoProvider(InterfaceError):
        pass

    class UnsupportedProvider(InterfaceError):
        pass

    class ConnectionFailed(InterfaceError):
        pass

    class UnknownContract(InterfaceError):
        pass

    class NotEnoughConfirmations(InterfaceError):
        pass

    REASONS = {
        INSUFFICIENT_ETH: 'insufficient funds for gas * price + value',
    }

    class TransactionFailed(InterfaceError):

        IPC_CODE = -32000  # (geth)

        def __init__(self, message: str, transaction_dict: dict,
                     contract_function: Union[ContractFunction,
                                              ContractConstructor], *args):

            self.base_message = message
            self.name = get_transaction_name(
                contract_function=contract_function)
            self.payload = transaction_dict
            self.contract_function = contract_function
            self.failures = {
                BlockchainInterface.REASONS[INSUFFICIENT_ETH]:
                self.insufficient_eth
            }
            self.message = self.failures.get(self.base_message, self.default)
            super().__init__(self.message, *args)

        @property
        def default(self) -> str:
            message = f'{self.name} from {self.payload["from"][:6]} - {self.base_message}'
            return message

        @property
        def insufficient_eth(self) -> str:
            gas = (self.payload.get('gas', 1) * self.payload['gasPrice']
                   )  # FIXME: If gas is not included...
            cost = gas + self.payload.get('value', 0)
            blockchain = BlockchainInterfaceFactory.get_interface()
            balance = blockchain.client.get_balance(
                account=self.payload['from'])
            message = f'{self.payload} from {self.payload["from"][:8]} - {self.base_message}.' \
                      f'Calculated cost is {cost} but sender only has {balance}.'
            return message

    def __init__(
            self,
            emitter=None,  # TODO # 1754
            poa: bool = None,
            light: bool = False,
            provider_process=NO_PROVIDER_PROCESS,
            provider_uri: str = NO_BLOCKCHAIN_CONNECTION,
            provider: Web3Providers = NO_BLOCKCHAIN_CONNECTION,
            gas_strategy: Union[str, Callable] = DEFAULT_GAS_STRATEGY):
        """
        A blockchain "network interface"; The circumflex wraps entirely around the bounds of
        contract operations including compilation, deployment, and execution.

        TODO: #1502 - Move to API docs.

         Filesystem          Configuration           Node              Client                  EVM
        ================ ====================== =============== =====================  ===========================

         Solidity Files -- SolidityCompiler -                      --- HTTPProvider ------ ...
                                            |                    |
                                            |                    |
                                            |                    |
                                            - *BlockchainInterface* -- IPCProvider ----- External EVM (geth, parity...)
                                                       |         |
                                                       |         |
                                                 TestProvider ----- EthereumTester -------------
                                                                                                |
                                                                                                |
                                                                                        PyEVM (Development Chain)

         ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

         Runtime Files --                 --BlockchainInterface ----> Registry
                        |                |             ^
                        |                |             |
                        |                |             |
         Key Files ------ CharacterConfiguration     Agent                          ... (Contract API)
                        |                |             ^
                        |                |             |
                        |                |             |
                        |                |           Actor                          ...Blockchain-Character API)
                        |                |             ^
                        |                |             |
                        |                |             |
         Config File ---                  --------- Character                       ... (Public API)
                                                       ^
                                                       |
                                                     Human


        The Blockchain is the junction of the solidity compiler, a contract registry, and a collection of
        web3 network providers as a means of interfacing with the ethereum blockchain to execute
        or deploy contract code on the network.


        Compiler and Registry Usage
        -----------------------------

        Contracts are freshly re-compiled if an instance of SolidityCompiler is passed; otherwise,
        The registry will read contract data saved to disk that is be used to retrieve contact address and op-codes.
        Optionally, A registry instance can be passed instead.


        Provider Usage
        ---------------
        https: // github.com / ethereum / eth - tester     # available-backends


        * HTTP Provider - Web3 HTTP provider, typically JSON RPC 2.0 over HTTP
        * Websocket Provider - Web3 WS provider, typically JSON RPC 2.0 over WS, supply endpoint uri and websocket=True
        * IPC Provider - Web3 File based IPC provider transported over standard I/O
        * Custom Provider - A pre-initialized web3.py provider instance to attach to this interface

        """

        self.log = Logger('Blockchain')
        self.poa = poa
        self.provider_uri = provider_uri
        self._provider = provider
        self._provider_process = provider_process
        self.w3 = NO_BLOCKCHAIN_CONNECTION
        self.client = NO_BLOCKCHAIN_CONNECTION  # type: EthereumClient
        self.transacting_power = READ_ONLY_INTERFACE
        self.is_light = light
        self.gas_strategy = self.get_gas_strategy(gas_strategy)

    def __repr__(self):
        r = '{name}({uri})'.format(name=self.__class__.__name__,
                                   uri=self.provider_uri)
        return r

    @classmethod
    def from_dict(cls, payload: dict, **overrides) -> 'BlockchainInterface':
        payload.update({k: v for k, v in overrides.items() if v is not None})
        blockchain = cls(**payload)
        return blockchain

    def to_dict(self) -> dict:
        payload = dict(provider_uri=self.provider_uri,
                       poa=self.poa,
                       light=self.is_light)
        return payload

    @property
    def is_connected(self) -> bool:
        """
        https://web3py.readthedocs.io/en/stable/__provider.html#examples-using-automated-detection
        """
        if self.client is NO_BLOCKCHAIN_CONNECTION:
            return False
        return self.client.is_connected

    @classmethod
    def get_gas_strategy(cls, gas_strategy: Union[str, Callable]) -> Callable:
        try:
            gas_strategy = cls.GAS_STRATEGIES[gas_strategy]
        except KeyError:
            if gas_strategy and not callable(gas_strategy):
                raise ValueError(
                    f"{gas_strategy} must be callable to be a valid gas strategy."
                )
            else:
                gas_strategy = cls.GAS_STRATEGIES[cls.DEFAULT_GAS_STRATEGY]
        return gas_strategy

    def attach_middleware(self):
        if self.poa is None:  # If POA is not set explicitly, try to autodetect from chain id
            chain_id = int(self.client.chain_id)
            self.poa = chain_id in POA_CHAINS
            self.log.debug(
                f'Autodetecting POA chain ({self.client.chain_name})')

        # For use with Proof-Of-Authority test-blockchains
        if self.poa is True:
            self.log.debug('Injecting POA middleware at layer 0')
            self.client.inject_middleware(geth_poa_middleware, layer=0)

        # Gas Price Strategy
        self.client.w3.eth.setGasPriceStrategy(self.gas_strategy)
        self.client.w3.middleware_onion.add(
            middleware.time_based_cache_middleware)
        self.client.w3.middleware_onion.add(
            middleware.latest_block_based_cache_middleware)
        self.client.w3.middleware_onion.add(middleware.simple_cache_middleware)

    def connect(self):

        # Spawn child process
        if self._provider_process:
            self._provider_process.start()
            provider_uri = self._provider_process.provider_uri(scheme='file')
        else:
            provider_uri = self.provider_uri
            self.log.info(
                f"Using external Web3 Provider '{self.provider_uri}'")

        # Attach Provider
        self._attach_provider(provider=self._provider,
                              provider_uri=provider_uri)
        self.log.info("Connecting to {}".format(self.provider_uri))
        if self._provider is NO_BLOCKCHAIN_CONNECTION:
            raise self.NoProvider(
                "There are no configured blockchain providers")

        # Connect if not connected
        try:
            self.w3 = self.Web3(provider=self._provider)
            self.client = EthereumClient.from_w3(w3=self.w3)
        except requests.ConnectionError:  # RPC
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is RPC enabled?'
            )
        except FileNotFoundError:  # IPC File Protocol
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is IPC enabled?'
            )
        else:
            self.attach_middleware()

        return self.is_connected

    def sync(self, emitter=None) -> None:

        sync_state = self.client.sync()
        if emitter is not None:

            emitter.echo(
                f"Syncing: {self.client.chain_name.capitalize()}. Waiting for sync to begin.",
                verbosity=1)

            while not len(self.client.peers):
                emitter.echo("waiting for peers...", verbosity=1)
                time.sleep(5)

            peer_count = len(self.client.peers)
            emitter.echo(
                f"Found {'an' if peer_count == 1 else peer_count} Ethereum peer{('s' if peer_count > 1 else '')}.",
                verbosity=1)

            try:
                emitter.echo("Beginning sync...", verbosity=1)
                initial_state = next(sync_state)
            except StopIteration:  # will occur if no syncing needs to happen
                emitter.echo("Local blockchain data is already synced.",
                             verbosity=1)
                return

            prior_state = initial_state
            total_blocks_to_sync = int(initial_state.get(
                'highestBlock', 0)) - int(initial_state.get('currentBlock', 0))
            with click.progressbar(
                    length=total_blocks_to_sync,
                    label="sync progress",
                    file=emitter.get_stream(verbosity=1)) as bar:
                for syncdata in sync_state:
                    if syncdata:
                        blocks_accomplished = int(
                            syncdata['currentBlock']) - int(
                                prior_state.get('currentBlock', 0))
                        bar.update(blocks_accomplished)
                        prior_state = syncdata
        else:
            try:
                for syncdata in sync_state:
                    self.client.log.info(
                        f"Syncing {syncdata['currentBlock']}/{syncdata['highestBlock']}"
                    )
            except TypeError:  # it's already synced
                return
        return

    @property
    def provider(self) -> Union[IPCProvider, WebsocketProvider, HTTPProvider]:
        return self._provider

    def _attach_provider(self,
                         provider: Web3Providers = None,
                         provider_uri: str = None) -> None:
        """
        https://web3py.readthedocs.io/en/latest/providers.html#providers
        """

        if not provider_uri and not provider:
            raise self.NoProvider("No URI or provider instances supplied.")

        if provider_uri and not provider:
            uri_breakdown = urlparse(provider_uri)

            if uri_breakdown.scheme == 'tester':
                providers = {
                    'pyevm': _get_tester_pyevm,
                    'geth': _get_test_geth_parity_provider,
                    'parity-ethereum': _get_test_geth_parity_provider,
                }
                provider_scheme = uri_breakdown.netloc

            else:
                providers = {
                    'auto': _get_auto_provider,
                    'infura': _get_infura_provider,
                    'ipc': _get_IPC_provider,
                    'file': _get_IPC_provider,
                    'ws': _get_websocket_provider,
                    'http': _get_HTTP_provider,
                    'https': _get_HTTP_provider,
                }
                provider_scheme = uri_breakdown.scheme

            # auto-detect for file based ipc
            if not provider_scheme:
                if os.path.exists(provider_uri):
                    # file is available - assume ipc/file scheme
                    provider_scheme = 'file'
                    self.log.info(
                        f"Auto-detected provider scheme as 'file://' for provider {provider_uri}"
                    )

            try:
                self._provider = providers[provider_scheme](provider_uri)
            except KeyError:
                raise self.UnsupportedProvider(
                    f"{provider_uri} is an invalid or unsupported blockchain provider URI"
                )
            else:
                self.provider_uri = provider_uri or NO_BLOCKCHAIN_CONNECTION
        else:
            self._provider = provider

    def __transaction_failed(
        self, exception: Exception, transaction_dict: dict,
        contract_function: Union[ContractFunction,
                                 ContractConstructor]) -> None:
        """
        Re-raising error handler and context manager for transaction broadcast or
        build failure events at the interface layer. This method is a last line of defense
        against unhandled exceptions caused by transaction failures and must raise an exception.
        # TODO: #1504 - Additional Handling of validation failures (gas limits, invalid fields, etc.)
        """

        try:
            # Assume this error is formatted as an IPC response
            code, message = exception.args[0].values()

        except (ValueError, IndexError, AttributeError):
            # TODO: #1504 - Try even harder to determine if this is insufficient funds causing the issue,
            #               This may be best handled at the agent or actor layer for registry and token interactions.
            # Worst case scenario - raise the exception held in context implicitly
            raise exception

        else:
            if int(code) != self.TransactionFailed.IPC_CODE:
                # Only handle client-specific exceptions
                # https://www.jsonrpc.org/specification Section 5.1
                raise exception
            self.log.critical(message)  # simple context
            raise self.TransactionFailed(
                message=message,  # rich error (best case)
                contract_function=contract_function,
                transaction_dict=transaction_dict)

    def __log_transaction(self, transaction_dict: dict,
                          contract_function: ContractFunction):
        """
        Format and log a transaction dict and return the transaction name string.
        This method *must not* mutate the original transaction dict.
        """
        # Do not mutate the original transaction dict
        tx = dict(transaction_dict).copy()

        # Format
        if tx.get('to'):
            tx['to'] = to_checksum_address(contract_function.address)
        tx['from'] = to_checksum_address(tx['from'])
        tx.update({
            f: prettify_eth_amount(v)
            for f, v in tx.items() if f in ('gasPrice', 'value')
        })
        payload_pprint = ', '.join("{}: {}".format(k, v)
                                   for k, v in tx.items())

        # Log
        transaction_name = get_transaction_name(
            contract_function=contract_function)
        self.log.debug(f"[TX-{transaction_name}] | {payload_pprint}")

    @validate_checksum_address
    def build_transaction(
        self,
        contract_function: ContractFunction,
        sender_address: str,
        payload: dict = None,
        transaction_gas_limit: int = None,
    ) -> dict:

        #
        # Build Payload
        #

        base_payload = {
            'chainId':
            int(self.client.chain_id),
            'nonce':
            self.client.w3.eth.getTransactionCount(sender_address, 'pending'),
            'from':
            sender_address,
            'gasPrice':
            self.client.gas_price
        }

        # Aggregate
        if not payload:
            payload = {}
        payload.update(base_payload)
        # Explicit gas override - will skip gas estimation in next operation.
        if transaction_gas_limit:
            payload['gas'] = int(transaction_gas_limit)

        #
        # Build Transaction
        #

        self.__log_transaction(transaction_dict=payload,
                               contract_function=contract_function)
        try:
            transaction_dict = contract_function.buildTransaction(
                payload)  # Gas estimation occurs here
        except (TestTransactionFailed, ValidationError, ValueError) as error:
            # Note: Geth raises ValueError in the same condition that pyevm raises ValidationError here.
            # Treat this condition as "Transaction Failed" during gas estimation.
            raise self.__transaction_failed(
                exception=error,
                transaction_dict=payload,
                contract_function=contract_function)
        return transaction_dict

    def sign_and_broadcast_transaction(self,
                                       transaction_dict,
                                       transaction_name: str = "",
                                       confirmations: int = 0) -> dict:

        #
        # Setup
        #

        # TODO # 1754 - Move this to singleton - I do not approve... nor does Bogdan?
        if GlobalLoggerSettings._json_ipc:
            emitter = JSONRPCStdoutEmitter()
        else:
            emitter = StdoutEmitter()

        if self.transacting_power is READ_ONLY_INTERFACE:
            raise self.InterfaceError(str(READ_ONLY_INTERFACE))

        #
        # Sign
        #

        # TODO: Show the USD Price:  https://api.coinmarketcap.com/v1/ticker/ethereum/
        price = transaction_dict['gasPrice']
        cost_wei = price * transaction_dict['gas']
        cost = Web3.fromWei(cost_wei, 'gwei')
        if self.transacting_power.is_device:
            emitter.message(
                f'Confirm transaction {transaction_name} on hardware wallet... ({cost} gwei @ {price})',
                color='yellow')
        signed_raw_transaction = self.transacting_power.sign_transaction(
            transaction_dict)

        #
        # Broadcast
        #

        emitter.message(
            f'Broadcasting {transaction_name} Transaction ({cost} gwei @ {price})...',
            color='yellow')
        try:
            txhash = self.client.send_raw_transaction(
                signed_raw_transaction)  # <--- BROADCAST
        except (TestTransactionFailed, ValueError) as error:
            raise self.__transaction_failed(exception=error,
                                            transaction_dict=transaction_dict,
                                            transaction_name=transaction_name)

        #
        # Receipt
        #

        try:
            receipt = self.client.wait_for_receipt(txhash,
                                                   timeout=self.TIMEOUT)
        except TimeExhausted:
            # TODO: #1504 - Handle transaction timeout
            raise
        else:
            self.log.debug(
                f"[RECEIPT-{transaction_name}] | txhash: {receipt['transactionHash'].hex()}"
            )

        #
        # Confirmations
        #

        # Primary check
        deployment_status = receipt.get('status', UNKNOWN_TX_STATUS)
        if deployment_status == 0:
            failure = f"Transaction transmitted, but receipt returned status code 0. " \
                      f"Full receipt: \n {pprint.pformat(receipt, indent=2)}"
            raise self.InterfaceError(failure)

        if deployment_status is UNKNOWN_TX_STATUS:
            self.log.info(
                f"Unknown transaction status for {txhash} (receipt did not contain a status field)"
            )

            # Secondary check
            tx = self.client.get_transaction(txhash)
            if tx["gas"] == receipt["gasUsed"]:
                raise self.InterfaceError(
                    f"Transaction consumed 100% of transaction gas."
                    f"Full receipt: \n {pprint.pformat(receipt, indent=2)}")

        # Block confirmations
        if confirmations:
            start = maya.now()
            confirmations_so_far = self.get_confirmations(receipt)
            while confirmations_so_far < confirmations:
                self.log.info(
                    f"So far, we've received {confirmations_so_far} confirmations. "
                    f"Waiting for {confirmations - confirmations_so_far} more."
                )
                time.sleep(3)
                confirmations_so_far = self.get_confirmations(receipt)
                if (maya.now() - start).seconds > self.TIMEOUT:
                    raise self.NotEnoughConfirmations

        return receipt

    def get_confirmations(self, receipt: dict) -> int:
        tx_block_number = receipt.get('blockNumber')
        latest_block_number = self.w3.eth.blockNumber
        confirmations = latest_block_number - tx_block_number
        if confirmations < 0:
            raise ValueError(
                f"Can't get number of confirmations for transaction {receipt['transactionHash'].hex()}, "
                f"as it seems to come from {-confirmations} blocks in the future..."
            )
        return confirmations

    def get_blocktime(self):
        highest_block = self.w3.eth.getBlock('latest')
        now = highest_block['timestamp']
        return now

    @validate_checksum_address
    def send_transaction(self,
                         contract_function: Union[ContractFunction,
                                                  ContractConstructor],
                         sender_address: str,
                         payload: dict = None,
                         transaction_gas_limit: int = None,
                         confirmations: int = 0) -> dict:

        transaction = self.build_transaction(
            contract_function=contract_function,
            sender_address=sender_address,
            payload=payload,
            transaction_gas_limit=transaction_gas_limit)

        # Get transaction name
        try:
            transaction_name = contract_function.fn_name.upper()
        except AttributeError:
            transaction_name = 'DEPLOY' if isinstance(
                contract_function, ContractConstructor) else 'UNKNOWN'

        receipt = self.sign_and_broadcast_transaction(
            transaction_dict=transaction,
            transaction_name=transaction_name,
            confirmations=confirmations)
        return receipt

    def get_contract_by_name(
        self,
        registry: BaseContractRegistry,
        contract_name: str,
        contract_version: str = None,
        enrollment_version: Union[int, str] = None,
        proxy_name: str = None,
        use_proxy_address: bool = True
    ) -> Union[VersionedContract, List[tuple]]:
        """
        Instantiate a deployed contract from registry data,
        and assimilate it with its proxy if it is upgradeable,
        or return all registered records if use_proxy_address is False.
        """
        target_contract_records = registry.search(
            contract_name=contract_name, contract_version=contract_version)

        if not target_contract_records:
            raise self.UnknownContract(
                f"No such contract records with name {contract_name}:{contract_version}."
            )

        if proxy_name:

            # Lookup proxies; Search for a published proxy that targets this contract record
            proxy_records = registry.search(contract_name=proxy_name)

            results = list()
            for proxy_name, proxy_version, proxy_address, proxy_abi in proxy_records:
                proxy_contract = self.client.w3.eth.contract(
                    abi=proxy_abi,
                    address=proxy_address,
                    version=proxy_version,
                    ContractFactoryClass=self._contract_factory)

                # Read this dispatcher's target address from the blockchain
                proxy_live_target_address = proxy_contract.functions.target(
                ).call()
                for target_name, target_version, target_address, target_abi in target_contract_records:

                    if target_address == proxy_live_target_address:
                        if use_proxy_address:
                            triplet = (proxy_address, target_version,
                                       target_abi)
                        else:
                            triplet = (target_address, target_version,
                                       target_abi)
                    else:
                        continue

                    results.append(triplet)

            if len(results) > 1:
                address, _version, _abi = results[0]
                message = "Multiple {} deployments are targeting {}".format(
                    proxy_name, address)
                raise self.InterfaceError(message.format(contract_name))

            else:
                try:
                    selected_address, selected_version, selected_abi = results[
                        0]
                except IndexError:
                    raise self.UnknownContract(
                        f"There are no Dispatcher records targeting '{contract_name}':{contract_version}"
                    )

        else:
            # TODO: use_proxy_address doesnt' work in this case. Should we raise if used?

            # NOTE: 0 must be allowed as a valid version number
            if len(target_contract_records) != 1:
                if enrollment_version is None:
                    m = f"{len(target_contract_records)} records enrolled " \
                        f"for contract {contract_name}:{contract_version} " \
                        f"and no version index was supplied."
                    raise self.InterfaceError(m)
                enrollment_version = self.__get_enrollment_version_index(
                    name=contract_name,
                    contract_version=contract_version,
                    version_index=enrollment_version,
                    enrollments=len(target_contract_records))

            else:
                enrollment_version = -1  # default

            _contract_name, selected_version, selected_address, selected_abi = target_contract_records[
                enrollment_version]

        # Create the contract from selected sources
        unified_contract = self.client.w3.eth.contract(
            abi=selected_abi,
            address=selected_address,
            version=selected_version,
            ContractFactoryClass=self._contract_factory)

        return unified_contract

    @staticmethod
    def __get_enrollment_version_index(version_index: Union[int, str],
                                       enrollments: int, name: str,
                                       contract_version: str):
        version_names = {'latest': -1, 'earliest': 0}
        try:
            version = version_names[version_index]
        except KeyError:
            try:
                version = int(version_index)
            except ValueError:
                what_is_this = version_index
                raise ValueError(
                    f"'{what_is_this}' is not a valid enrollment version number"
                )
            else:
                if version > enrollments - 1:
                    message = f"Version index '{version}' is larger than the number of enrollments " \
                              f"for {name}:{contract_version}."
                    raise ValueError(message)
        return version
async def main(reactor, loops):
    """
    Benchmark how long it takes to send `loops` messages.
    """
    servers = []

    def protocol():
        p = LineCounter()
        servers.append(p)
        return p

    logger_factory = ServerFactory.forProtocol(protocol)
    logger_factory.wait_for = loops
    logger_factory.on_done = Deferred()
    port = reactor.listenTCP(0, logger_factory, interface="127.0.0.1")

    hs, wait, cleanup = await make_homeserver(reactor)

    errors = StringIO()
    publisher = LogPublisher()
    mock_sys = Mock()
    beginner = LogBeginner(publisher,
                           errors,
                           mock_sys,
                           warnings,
                           initialBufferSize=loops)

    log_config = {
        "loggers": {
            "synapse": {
                "level": "DEBUG"
            }
        },
        "drains": {
            "tersejson": {
                "type": "network_json_terse",
                "host": "127.0.0.1",
                "port": port.getHost().port,
                "maximum_buffer": 100,
            }
        },
    }

    logger = Logger(namespace="synapse.logging.test_terse_json",
                    observer=publisher)
    logging_system = setup_structured_logging(hs,
                                              hs.config,
                                              log_config,
                                              logBeginner=beginner,
                                              redirect_stdlib_logging=False)

    # Wait for it to connect...
    await logging_system._observers[0]._service.whenConnected()

    start = perf_counter()

    # Send a bunch of useful messages
    for i in range(0, loops):
        logger.info("test message %s" % (i, ))

        if (len(logging_system._observers[0]._buffer) ==
                logging_system._observers[0].maximum_buffer):
            while (len(logging_system._observers[0]._buffer) >
                   logging_system._observers[0].maximum_buffer / 2):
                await wait(0.01)

    await logger_factory.on_done

    end = perf_counter() - start

    logging_system.stop()
    port.stopListening()
    cleanup()

    return end
Exemple #27
0
class TesterBlockchain(Blockchain):
    """
    Blockchain subclass with additional test utility methods and options.
    """

    _instance = NO_BLOCKCHAIN_AVAILABLE
    _test_account_cache = list()

    def __init__(self,
                 test_accounts=None,
                 poa=True,
                 airdrop=True,
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)

        self.log = Logger("test-blockchain")  # type: Logger

        # For use with Proof-Of-Authority test-blockchains
        if poa is True:
            w3 = self.interface.w3
            w3.middleware_stack.inject(geth_poa_middleware, layer=0)

        # Generate additional ethereum accounts for testing
        enough_accounts = len(
            self.interface.w3.eth.accounts
        ) >= DEFAULT_NUMBER_OF_URSULAS_IN_DEVELOPMENT_NETWORK
        if test_accounts is not None and not enough_accounts:

            accounts_to_make = DEFAULT_NUMBER_OF_URSULAS_IN_DEVELOPMENT_NETWORK - len(
                self.interface.w3.eth.accounts)
            test_accounts = test_accounts if test_accounts is not None else DEFAULT_NUMBER_OF_URSULAS_IN_DEVELOPMENT_NETWORK

            self.__generate_insecure_unlocked_accounts(
                quantity=accounts_to_make)

            assert test_accounts == len(self.interface.w3.eth.accounts)

        if airdrop is True:  # ETH for everyone!
            self.ether_airdrop(amount=DEVELOPMENT_ETH_AIRDROP_AMOUNT)

    @classmethod
    def sever_connection(cls) -> None:
        cls._instance = NO_BLOCKCHAIN_AVAILABLE

    def __generate_insecure_unlocked_accounts(self,
                                              quantity: int) -> List[str]:
        """
        Generate additional unlocked accounts transferring a balance to each account on creation.
        """
        addresses = list()
        insecure_passphrase = TEST_URSULA_INSECURE_DEVELOPMENT_PASSWORD
        for _ in range(quantity):

            umbral_priv_key = UmbralPrivateKey.gen_key()
            address = self.interface.w3.personal.importRawKey(
                private_key=umbral_priv_key.to_bytes(),
                passphrase=insecure_passphrase)

            assert self.interface.unlock_account(
                address, password=insecure_passphrase,
                duration=None), 'Failed to unlock {}'.format(address)
            addresses.append(address)
            self._test_account_cache.append(address)
            self.log.info('Generated new insecure account {}'.format(address))

        return addresses

    def ether_airdrop(self, amount: int) -> List[str]:
        """Airdrops ether from creator address to all other addresses!"""

        coinbase, *addresses = self.interface.w3.eth.accounts

        tx_hashes = list()
        for address in addresses:

            tx = {'to': address, 'from': coinbase, 'value': amount}
            txhash = self.interface.w3.eth.sendTransaction(tx)

            _receipt = self.wait_for_receipt(txhash)
            tx_hashes.append(txhash)
            self.log.info("Airdropped {} ETH {} -> {}".format(
                amount, tx['from'], tx['to']))

        return tx_hashes

    def time_travel(self,
                    hours: int = None,
                    seconds: int = None,
                    periods: int = None):
        """
        Wait the specified number of wait_hours by comparing
        block timestamps and mines a single block.
        """

        more_than_one_arg = sum(map(bool, (hours, seconds, periods))) > 1
        if more_than_one_arg:
            raise ValueError(
                "Specify hours, seconds, or lock_periods, not a combination")

        if periods:
            duration = (constants.HOURS_PER_PERIOD * periods) * (60 * 60)
            base = constants.HOURS_PER_PERIOD * 60 * 60
        elif hours:
            duration = hours * (60 * 60)
            base = 60 * 60
        elif seconds:
            duration = seconds
            base = 1
        else:
            raise ValueError("Specify either hours, seconds, or lock_periods.")

        now = self.interface.w3.eth.getBlock(
            block_identifier='latest').timestamp
        end_timestamp = ((now + duration) // base) * base

        self.interface.w3.eth.web3.testing.timeTravel(timestamp=end_timestamp)
        self.interface.w3.eth.web3.testing.mine(1)
        self.log.info("Time traveled to {}".format(end_timestamp))
Exemple #28
0
class LocalFileBasedNodeStorage(NodeStorage):

    _name = 'local'
    __FILENAME_TEMPLATE = '{}.node'
    __DEFAULT_DIR = os.path.join(DEFAULT_CONFIG_ROOT, 'known_nodes', 'metadata')

    class NoNodeMetadataFileFound(FileNotFoundError, NodeStorage.UnknownNode):
        pass

    def __init__(self,
                 known_metadata_dir: str = __DEFAULT_DIR,
                 *args, **kwargs
                 ) -> None:

        super().__init__(*args, **kwargs)
        self.log = Logger(self.__class__.__name__)
        self.known_metadata_dir = known_metadata_dir

    def __generate_filepath(self, checksum_address: str) -> str:
        metadata_path = os.path.join(self.known_metadata_dir, self.__FILENAME_TEMPLATE.format(checksum_address))
        return metadata_path

    def __read(self, filepath: str, federated_only: bool):
        from nucypher.characters.lawful import Ursula
        try:
            with open(filepath, "rb") as seed_file:
                seed_file.seek(0)
                node_bytes = self.deserializer(seed_file.read())
                node = Ursula.from_bytes(node_bytes, federated_only=federated_only)
        except FileNotFoundError:
            raise self.UnknownNode
        return node

    def __write(self, filepath: str, node):
        with open(filepath, "wb") as f:
            f.write(self.serializer(self.character_class.__bytes__(node)))
        self.log.info("Wrote new node metadata to filesystem {}".format(filepath))
        return filepath

    def all(self, federated_only: bool) -> set:
        filenames = os.listdir(self.known_metadata_dir)
        self.log.info("Found {} known node metadata files at {}".format(len(filenames), self.known_metadata_dir))
        known_nodes = set()
        for filename in filenames:
            metadata_path = os.path.join(self.known_metadata_dir, filename)
            node = self.__read(filepath=metadata_path, federated_only=federated_only)   # TODO: 466
            known_nodes.add(node)
        return known_nodes

    def get(self, checksum_address: str, federated_only: bool):
        metadata_path = self.__generate_filepath(checksum_address=checksum_address)
        node = self.__read(filepath=metadata_path, federated_only=federated_only)   # TODO: 466
        return node

    def save(self, node):
        try:
            filepath = self.__generate_filepath(checksum_address=node.checksum_public_address)
        except AttributeError:
            raise AttributeError("{} does not have a rest_interface attached".format(self))  # TODO.. eh?
        self.__write(filepath=filepath, node=node)

    def remove(self, checksum_address: str):
        filepath = self.__generate_filepath(checksum_address=checksum_address)
        self.log.debug("Delted {} from the filesystem".format(checksum_address))
        return os.remove(filepath)

    def clear(self):
        self.__known_nodes = dict()

    def payload(self) -> dict:
        payload = {
            'storage_type': self._name,
            'known_metadata_dir': self.known_metadata_dir
        }
        return payload

    @classmethod
    def from_payload(cls, payload: dict, *args, **kwargs) -> 'LocalFileBasedNodeStorage':
        storage_type = payload[cls._TYPE_LABEL]
        if not storage_type == cls._name:
            raise cls.NodeStorageError("Wrong storage type. got {}".format(storage_type))
        return cls(known_metadata_dir=payload['known_metadata_dir'], *args, **kwargs)

    def initialize(self):
        try:
            os.mkdir(self.known_metadata_dir, mode=0o755)  # known_metadata
        except FileExistsError:
            message = "There are pre-existing metadata files at {}".format(self.known_metadata_dir)
            raise self.NodeStorageError(message)
        except FileNotFoundError:
            raise self.NodeStorageError("There is no existing configuration at {}".format(self.known_metadata_dir))
Exemple #29
0
class BlockchainInterface:
    """
    Interacts with a solidity compiler and a registry in order to instantiate compiled
    ethereum contracts with the given web3 provider backend.
    """

    TIMEOUT = 180  # seconds
    NULL_ADDRESS = '0x' + '0' * 40

    _instance = NO_BLOCKCHAIN_CONNECTION.bool_value(False)
    process = NO_PROVIDER_PROCESS.bool_value(False)
    Web3 = Web3

    _contract_factory = Contract

    class InterfaceError(Exception):
        pass

    class NoProvider(InterfaceError):
        pass

    class ConnectionFailed(InterfaceError):
        pass

    class UnknownContract(InterfaceError):
        pass

    def __init__(self,
                 poa: bool = True,
                 provider_process: NuCypherGethProcess = NO_PROVIDER_PROCESS,
                 provider_uri: str = NO_BLOCKCHAIN_CONNECTION,
                 transacting_power: TransactingPower = READ_ONLY_INTERFACE,
                 provider: Web3Providers = NO_BLOCKCHAIN_CONNECTION,
                 registry: EthereumContractRegistry = None):
        """
        A blockchain "network interface"; The circumflex wraps entirely around the bounds of
        contract operations including compilation, deployment, and execution.

         Filesystem          Configuration           Node              Client                  EVM
        ================ ====================== =============== =====================  ===========================

         Solidity Files -- SolidityCompiler ---                  --- HTTPProvider ------ ...
                                               |                |
                                               |                |

                                                 *Blockchain* -- IPCProvider ----- External EVM (geth, parity...)

                                               |      |         |
                                               |      |         |
         Registry File -- ContractRegistry ---        |          ---- TestProvider ----- EthereumTester
                                                      |
                        |                             |                                         |
                        |                             |
                                                                                        PyEVM (Development Chain)
         Runtime Files --                 -------- Blockchain
                                         |
                        |                |             |

         Key Files ------ CharacterConfiguration -------- Agent ... (Contract API)

                        |                |             |
                        |                |
                        |                 ---------- Actor ... (Blockchain-Character API)
                        |
                        |                              |
                        |
         Config File ---                           Character ... (Public API)

                                                       |

                                                     Human


        The Blockchain is the junction of the solidity compiler, a contract registry, and a collection of
        web3 network providers as a means of interfacing with the ethereum blockchain to execute
        or deploy contract code on the network.


        Compiler and Registry Usage
        -----------------------------

        Contracts are freshly re-compiled if an instance of SolidityCompiler is passed; otherwise,
        The registry will read contract data saved to disk that is be used to retrieve contact address and op-codes.
        Optionally, A registry instance can be passed instead.


        Provider Usage
        ---------------
        https: // github.com / ethereum / eth - tester     # available-backends


        * HTTP Provider - Web3 HTTP provider, typically JSON RPC 2.0 over HTTP
        * Websocket Provider - Web3 WS provider, typically JSON RPC 2.0 over WS, supply endpoint uri and websocket=True
        * IPC Provider - Web3 File based IPC provider transported over standard I/O
        * Custom Provider - A pre-initialized web3.py provider instance to attach to this interface

        """

        self.log = Logger('Blockchain')
        self.poa = poa
        self.provider_uri = provider_uri
        self._provider = provider
        self._provider_process = provider_process
        self.client = NO_BLOCKCHAIN_CONNECTION
        self.transacting_power = transacting_power
        self.registry = registry
        BlockchainInterface._instance = self

    def __repr__(self):
        r = '{name}({uri})'.format(name=self.__class__.__name__,
                                   uri=self.provider_uri)
        return r

    @classmethod
    def from_dict(cls, payload: dict, **overrides) -> 'BlockchainInterface':

        # Apply overrides
        payload.update({k: v for k, v in overrides.items() if v is not None})

        registry = EthereumContractRegistry(
            registry_filepath=payload['registry_filepath'])
        blockchain = cls(provider_uri=payload['provider_uri'],
                         registry=registry)
        return blockchain

    def to_dict(self) -> dict:
        payload = dict(provider_uri=self.provider_uri,
                       poa=self.poa,
                       registry_filepath=self.registry.filepath)
        return payload

    def _configure_registry(self, fetch_registry: bool = True) -> None:
        RegistryClass = EthereumContractRegistry._get_registry_class(
            local=self.client.is_local)
        if fetch_registry:
            registry = RegistryClass.from_latest_publication()
        else:
            registry = RegistryClass()
        self.registry = registry
        self.log.info("Using contract registry {}".format(
            self.registry.filepath))

    @property
    def is_connected(self) -> bool:
        """
        https://web3py.readthedocs.io/en/stable/__provider.html#examples-using-automated-detection
        """
        if self.client is NO_BLOCKCHAIN_CONNECTION:
            return False
        return self.client.is_connected

    def disconnect(self) -> None:
        if self._provider_process:
            self._provider_process.stop()
        self._provider_process = NO_PROVIDER_PROCESS
        self._provider = NO_BLOCKCHAIN_CONNECTION
        BlockchainInterface._instance = NO_BLOCKCHAIN_CONNECTION

    @classmethod
    def reconnect(cls, *args, **kwargs) -> 'BlockchainInterface':
        return cls._instance

    def attach_middleware(self):

        # For use with Proof-Of-Authority test-blockchains
        if self.poa is True:
            self.log.debug('Injecting POA middleware at layer 0')
            self.client.inject_middleware(geth_poa_middleware, layer=0)

    def connect(self, fetch_registry: bool = True, sync_now: bool = False):

        # Spawn child process
        if self._provider_process:
            self._provider_process.start()
            provider_uri = self._provider_process.provider_uri(scheme='file')
        else:
            provider_uri = self.provider_uri
            self.log.info(
                f"Using external Web3 Provider '{self.provider_uri}'")

        # Attach Provider
        self._attach_provider(provider=self._provider,
                              provider_uri=provider_uri)
        self.log.info("Connecting to {}".format(self.provider_uri))
        if self._provider is NO_BLOCKCHAIN_CONNECTION:
            raise self.NoProvider(
                "There are no configured blockchain providers")

        # Connect Web3 Instance
        try:
            self.w3 = self.Web3(provider=self._provider)
            self.client = Web3Client.from_w3(w3=self.w3)
        except requests.ConnectionError:  # RPC
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is RPC enabled?'
            )
        except FileNotFoundError:  # IPC File Protocol
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is IPC enabled?'
            )
        else:
            self.attach_middleware()

        # Wait for chaindata sync
        if sync_now:
            self.client.sync()

        # Establish contact with NuCypher contracts
        if not self.registry:
            self._configure_registry(fetch_registry=fetch_registry)

        return self.is_connected

    @property
    def provider(self) -> Union[IPCProvider, WebsocketProvider, HTTPProvider]:
        return self._provider

    def _attach_provider(self,
                         provider: Web3Providers = None,
                         provider_uri: str = None) -> None:
        """
        https://web3py.readthedocs.io/en/latest/providers.html#providers
        """

        if not provider_uri and not provider:
            raise self.NoProvider("No URI or provider instances supplied.")

        if provider_uri and not provider:
            uri_breakdown = urlparse(provider_uri)

            if uri_breakdown.scheme == 'tester':
                providers = {
                    'pyevm': _get_tester_pyevm,
                    'geth': _get_test_geth_parity_provider,
                    'parity-ethereum': _get_test_geth_parity_provider,
                }
                provider_scheme = uri_breakdown.netloc
            else:
                providers = {
                    'auto': _get_auto_provider,
                    'infura': _get_infura_provider,
                    'ipc': _get_IPC_provider,
                    'file': _get_IPC_provider,
                    'ws': _get_websocket_provider,
                    'http': _get_HTTP_provider,
                    'https': _get_HTTP_provider,
                }
                provider_scheme = uri_breakdown.scheme
            try:
                self._provider = providers[provider_scheme](provider_uri)
            except KeyError:
                raise ValueError(
                    f"{provider_uri} is an invalid or unsupported blockchain provider URI"
                )
            else:
                self.provider_uri = provider_uri or NO_BLOCKCHAIN_CONNECTION
        else:
            self._provider = provider

    def send_transaction(
        self,
        contract_function: ContractFunction,
        sender_address: str,
        payload: dict = None,
    ) -> dict:

        if self.transacting_power is READ_ONLY_INTERFACE:
            raise self.InterfaceError(str(READ_ONLY_INTERFACE))

        #
        # Build
        #

        if not payload:
            payload = {}

        nonce = self.client.w3.eth.getTransactionCount(sender_address)
        payload.update({
            'chainId': int(self.client.net_version),
            'nonce': nonce,
            'from': sender_address,
            'gasPrice': self.client.gas_price,
            # 'gas': 0,  # TODO: Gas Management
        })

        # Get interface name
        deployment = True if isinstance(contract_function,
                                        ContractConstructor) else False

        try:
            transaction_name = contract_function.fn_name.upper()
        except AttributeError:
            if deployment:
                transaction_name = 'DEPLOY'
            else:
                transaction_name = 'UNKNOWN'

        payload_pprint = dict(payload)
        payload_pprint['from'] = to_checksum_address(payload['from'])
        payload_pprint = ', '.join("{}: {}".format(k, v)
                                   for k, v in payload_pprint.items())
        self.log.debug(f"[TX-{transaction_name}] | {payload_pprint}")

        # Build transaction payload
        try:
            unsigned_transaction = contract_function.buildTransaction(payload)
        except ValidationError as e:
            # TODO: Handle validation failures for gas limits, invalid fields, etc.
            self.log.warn(f"Validation error: {e}")
            raise
        else:
            if deployment:
                self.log.info(
                    f"Deploying contract: {len(unsigned_transaction['data'])} bytes"
                )

        #
        # Broadcast
        #

        signed_raw_transaction = self.transacting_power.sign_transaction(
            unsigned_transaction)
        txhash = self.client.send_raw_transaction(signed_raw_transaction)

        try:
            receipt = self.client.wait_for_receipt(txhash,
                                                   timeout=self.TIMEOUT)
        except TimeExhausted:
            # TODO: Handle transaction timeout
            raise
        else:
            self.log.debug(
                f"[RECEIPT-{transaction_name}] | txhash: {receipt['transactionHash'].hex()}"
            )

        #
        # Confirm
        #

        # Primary check
        deployment_status = receipt.get('status', UNKNOWN_TX_STATUS)
        if deployment_status is 0:
            failure = f"Transaction transmitted, but receipt returned status code 0. " \
                      f"Full receipt: \n {pprint.pformat(receipt, indent=2)}"
            raise self.InterfaceError(failure)

        if deployment_status is UNKNOWN_TX_STATUS:
            self.log.info(
                f"Unknown transaction status for {txhash} (receipt did not contain a status field)"
            )

            # Secondary check TODO: Is this a sensible check?
            tx = self.client.get_transaction(txhash)
            if tx["gas"] == receipt["gasUsed"]:
                raise self.InterfaceError(
                    f"Transaction consumed 100% of transaction gas."
                    f"Full receipt: \n {pprint.pformat(receipt, indent=2)}")

        return receipt

    def get_contract_by_name(
            self,
            name: str,
            proxy_name: str = None,
            use_proxy_address: bool = True) -> Union[Contract, List[tuple]]:
        """
        Instantiate a deployed contract from registry data,
        and assimilate it with its proxy if it is upgradeable,
        or return all registered records if use_proxy_address is False.
        """
        target_contract_records = self.registry.search(contract_name=name)

        if not target_contract_records:
            raise self.UnknownContract(
                f"No such contract records with name {name}.")

        if proxy_name:  # It's upgradeable
            # Lookup proxies; Search for a published proxy that targets this contract record

            proxy_records = self.registry.search(contract_name=proxy_name)

            results = list()
            for proxy_name, proxy_addr, proxy_abi in proxy_records:
                proxy_contract = self.client.w3.eth.contract(
                    abi=proxy_abi,
                    address=proxy_addr,
                    ContractFactoryClass=self._contract_factory)

                # Read this dispatcher's target address from the blockchain
                proxy_live_target_address = proxy_contract.functions.target(
                ).call()
                for target_name, target_addr, target_abi in target_contract_records:

                    if target_addr == proxy_live_target_address:
                        if use_proxy_address:
                            pair = (proxy_addr, target_abi)
                        else:
                            pair = (proxy_live_target_address, target_abi)
                    else:
                        continue

                    results.append(pair)

            if len(results) > 1:
                address, abi = results[0]
                message = "Multiple {} deployments are targeting {}".format(
                    proxy_name, address)
                raise self.InterfaceError(message.format(name))

            else:
                selected_address, selected_abi = results[0]

        else:  # It's not upgradeable
            if len(target_contract_records) != 1:
                m = "Multiple records registered for non-upgradeable contract {}"
                raise self.InterfaceError(m.format(name))
            _target_contract_name, selected_address, selected_abi = target_contract_records[
                0]

        # Create the contract from selected sources
        unified_contract = self.client.w3.eth.contract(
            abi=selected_abi,
            address=selected_address,
            ContractFactoryClass=self._contract_factory)

        return unified_contract
Exemple #30
0
class ContractAdministrator(NucypherTokenActor):
    """
    The administrator of network contracts.
    """

    __interface_class = BlockchainDeployerInterface

    #
    # Deployer classes sorted by deployment dependency order.
    #

    standard_deployer_classes = (
        NucypherTokenDeployer,
    )

    dispatched_upgradeable_deployer_classes = (
        StakingEscrowDeployer,
        PolicyManagerDeployer,
        AdjudicatorDeployer,
    )

    upgradeable_deployer_classes = (
        *dispatched_upgradeable_deployer_classes,
        StakingInterfaceDeployer,
    )

    ownable_deployer_classes = (*dispatched_upgradeable_deployer_classes, )

    deployer_classes = (*standard_deployer_classes,
                        *upgradeable_deployer_classes)

    class UnknownContract(ValueError):
        pass

    def __init__(self,
                 registry: BaseContractRegistry,
                 deployer_address: str = None,
                 client_password: str = None,
                 economics: TokenEconomics = None):
        """
        Note: super() is not called here to avoid setting the token agent.
        TODO: Review this logic ^^ "bare mode".
        """
        self.log = Logger("Deployment-Actor")

        self.deployer_address = deployer_address
        self.checksum_address = self.deployer_address
        self.economics = economics or StandardTokenEconomics()

        self.registry = registry
        self.preallocation_escrow_deployers = dict()
        self.deployers = {d.contract_name: d for d in self.deployer_classes}

        self.transacting_power = TransactingPower(password=client_password, account=deployer_address, cache=True)
        self.transacting_power.activate()

    def __repr__(self):
        r = '{name} - {deployer_address})'.format(name=self.__class__.__name__, deployer_address=self.deployer_address)
        return r

    def __get_deployer(self, contract_name: str):
        try:
            Deployer = self.deployers[contract_name]
        except KeyError:
            raise self.UnknownContract(contract_name)
        return Deployer

    @staticmethod
    def collect_deployment_secret(deployer) -> str:
        secret = click.prompt(f'Enter {deployer.contract_name} Deployment Secret',
                              hide_input=True,
                              confirmation_prompt=True)
        return secret

    def collect_deployment_secrets(self) -> dict:
        secrets = dict()
        for deployer in self.upgradeable_deployer_classes:
            secrets[deployer.contract_name] = self.collect_deployment_secret(deployer)
        return secrets

    def deploy_contract(self,
                        contract_name: str,
                        gas_limit: int = None,
                        plaintext_secret: str = None,
                        bare: bool = False,
                        progress=None,
                        *args, **kwargs,
                        ) -> Tuple[dict, BaseContractDeployer]:

        Deployer = self.__get_deployer(contract_name=contract_name)
        deployer = Deployer(registry=self.registry,
                            deployer_address=self.deployer_address,
                            economics=self.economics,
                            *args, **kwargs)

        self.transacting_power.activate()  # Activate the TransactingPower in case too much time has passed
        if Deployer._upgradeable:
            is_initial_deployment = not bare
            if is_initial_deployment and not plaintext_secret:
                raise ValueError("An upgrade secret must be passed to perform initial deployment of a Dispatcher.")
            secret_hash = None
            if plaintext_secret:
                secret_hash = keccak(bytes(plaintext_secret, encoding='utf-8'))
            receipts = deployer.deploy(secret_hash=secret_hash,
                                       gas_limit=gas_limit,
                                       initial_deployment=is_initial_deployment,
                                       progress=progress)
        else:
            receipts = deployer.deploy(gas_limit=gas_limit, progress=progress)
        return receipts, deployer

    def upgrade_contract(self, contract_name: str, existing_plaintext_secret: str, new_plaintext_secret: str) -> dict:
        Deployer = self.__get_deployer(contract_name=contract_name)
        deployer = Deployer(registry=self.registry, deployer_address=self.deployer_address)
        new_secret_hash = keccak(bytes(new_plaintext_secret, encoding='utf-8'))
        receipts = deployer.upgrade(existing_secret_plaintext=bytes(existing_plaintext_secret, encoding='utf-8'),
                                    new_secret_hash=new_secret_hash)
        return receipts

    def retarget_proxy(self, contract_name: str, target_address: str, existing_plaintext_secret: str, new_plaintext_secret: str):
        Deployer = self.__get_deployer(contract_name=contract_name)
        deployer = Deployer(registry=self.registry, deployer_address=self.deployer_address)
        new_secret_hash = keccak(bytes(new_plaintext_secret, encoding='utf-8'))
        receipts = deployer.retarget(target_address=target_address,
                                     existing_secret_plaintext=bytes(existing_plaintext_secret, encoding='utf-8'),
                                     new_secret_hash=new_secret_hash)
        return receipts

    def rollback_contract(self, contract_name: str, existing_plaintext_secret: str, new_plaintext_secret: str):
        Deployer = self.__get_deployer(contract_name=contract_name)
        deployer = Deployer(registry=self.registry, deployer_address=self.deployer_address)
        new_secret_hash = keccak(bytes(new_plaintext_secret, encoding='utf-8'))
        receipts = deployer.rollback(existing_secret_plaintext=bytes(existing_plaintext_secret, encoding='utf-8'),
                                     new_secret_hash=new_secret_hash)
        return receipts

    def deploy_preallocation_escrow(self, allocation_registry: AllocationRegistry, progress=None) -> PreallocationEscrowDeployer:
        preallocation_escrow_deployer = PreallocationEscrowDeployer(registry=self.registry,
                                                                    deployer_address=self.deployer_address,
                                                                    allocation_registry=allocation_registry)
        preallocation_escrow_deployer.deploy(progress=progress)
        principal_address = preallocation_escrow_deployer.contract.address
        self.preallocation_escrow_deployers[principal_address] = preallocation_escrow_deployer
        return preallocation_escrow_deployer

    def deploy_network_contracts(self,
                                 secrets: dict,
                                 interactive: bool = True,
                                 emitter: StdoutEmitter = None,
                                 etherscan: bool = False) -> dict:
        """

        :param secrets: Contract upgrade secrets dictionary
        :param interactive: If True, wait for keypress after each contract deployment
        :param emitter: A console output emitter instance. If emitter is None, no output will be echoed to the console.
        :param etherscan: Open deployed contracts in Etherscan
        :return: Returns a dictionary of deployment receipts keyed by contract name
        """

        if interactive and not emitter:
            raise ValueError("'emitter' is a required keyword argument when interactive is True.")

        deployment_receipts = dict()
        gas_limit = None  # TODO: Gas management

        # deploy contracts
        total_deployment_transactions = 0
        for deployer_class in self.deployer_classes:
            total_deployment_transactions += len(deployer_class.deployment_steps)

        first_iteration = True
        with click.progressbar(length=total_deployment_transactions,
                               label="Deployment progress",
                               show_eta=False) as bar:
            bar.short_limit = 0
            for deployer_class in self.deployer_classes:
                if interactive and not first_iteration:
                    click.pause(info=f"\nPress any key to continue with deployment of {deployer_class.contract_name}")

                if emitter:
                    emitter.echo(f"\nDeploying {deployer_class.contract_name} ...")
                    bar._last_line = None
                    bar.render_progress()

                if deployer_class in self.standard_deployer_classes:
                    receipts, deployer = self.deploy_contract(contract_name=deployer_class.contract_name,
                                                              gas_limit=gas_limit,
                                                              progress=bar)
                else:
                    receipts, deployer = self.deploy_contract(contract_name=deployer_class.contract_name,
                                                              plaintext_secret=secrets[deployer_class.contract_name],
                                                              gas_limit=gas_limit,
                                                              progress=bar)

                if emitter:
                    blockchain = BlockchainInterfaceFactory.get_interface()
                    paint_contract_deployment(contract_name=deployer_class.contract_name,
                                              receipts=receipts,
                                              contract_address=deployer.contract_address,
                                              emitter=emitter,
                                              chain_name=blockchain.client.chain_name,
                                              open_in_browser=etherscan)

                deployment_receipts[deployer_class.contract_name] = receipts
                first_iteration = False

        return deployment_receipts

    def relinquish_ownership(self,
                             new_owner: str,
                             emitter: StdoutEmitter = None,
                             interactive: bool = True,
                             transaction_gas_limit: int = None) -> dict:

        if not is_checksum_address(new_owner):
            raise ValueError(f"{new_owner} is an invalid EIP-55 checksum address.")

        receipts = dict()

        for contract_deployer in self.ownable_deployer_classes:
            deployer = contract_deployer(registry=self.registry, deployer_address=self.deployer_address)
            deployer.transfer_ownership(new_owner=new_owner, transaction_gas_limit=transaction_gas_limit)

            if emitter:
                emitter.echo(f"Transferred ownership of {deployer.contract_name} to {new_owner}")

            if interactive:
                click.pause(info="Press any key to continue")

            receipts[contract_deployer.contract_name] = receipts

        return receipts

    def deploy_beneficiary_contracts(self,
                                     allocations: List[Dict[str, Union[str, int]]],
                                     allocation_outfile: str = None,
                                     allocation_registry: AllocationRegistry = None,
                                     crash_on_failure: bool = True,
                                     interactive: bool = True,
                                     emitter: StdoutEmitter = None,
                                     ) -> Dict[str, dict]:
        """
        The allocation file contains a list of allocations, each of them composed of:
          * 'beneficiary_address': Checksum address of the beneficiary
          * 'name': User-friendly name of the beneficiary (Optional)
          * 'amount': Amount of tokens locked, in NuNits
          * 'duration_seconds': Lock duration expressed in seconds

        It accepts both CSV and JSON formats. Example allocation file in CSV format:

        "beneficiary_address","name","amount","duration_seconds"
        "0xdeadbeef","H. E. Pennypacker",100,31536000
        "0xabced120","",133432,31536000
        "0xf7aefec2","",999,31536000

        Example allocation file in JSON format:

        [ {'beneficiary_address': '0xdeadbeef', 'name': 'H. E. Pennypacker', 'amount': 100, 'duration_seconds': 31536000},
          {'beneficiary_address': '0xabced120', 'amount': 133432, 'duration_seconds': 31536000},
          {'beneficiary_address': '0xf7aefec2', 'amount': 999, 'duration_seconds': 31536000}]

        """

        if interactive and not emitter:
            raise ValueError("'emitter' is a required keyword argument when interactive is True.")

        if allocation_registry and allocation_outfile:
            raise self.ActorError("Pass either allocation registry or allocation_outfile, not both.")
        if allocation_registry is None:
            allocation_registry = AllocationRegistry(filepath=allocation_outfile)

        if emitter:
            paint_input_allocation_file(emitter, allocations)

        if interactive:
            click.confirm("Continue with the allocation process?", abort=True)

        total_to_allocate = NU.from_nunits(sum(allocation['amount'] for allocation in allocations))
        balance = ContractAgency.get_agent(NucypherTokenAgent, self.registry).get_balance(self.deployer_address)
        if balance < total_to_allocate:
            raise ValueError(f"Not enough tokens to allocate. We need at least {total_to_allocate}.")

        allocation_receipts, failed, allocated = dict(), list(), list()
        total_deployment_transactions = len(allocations) * 4

        # Create an allocation template file, containing the allocation contract ABI and placeholder values
        # for the beneficiary and contract addresses. This file will be shared with all allocation users.
        empty_allocation_escrow_deployer = PreallocationEscrowDeployer(registry=self.registry)
        allocation_contract_abi = empty_allocation_escrow_deployer.get_contract_abi()
        allocation_template = {
            "BENEFICIARY_ADDRESS": ["ALLOCATION_CONTRACT_ADDRESS", allocation_contract_abi]
        }

        parent_path = Path(allocation_registry.filepath).parent  # Use same folder as allocation registry
        template_filename = IndividualAllocationRegistry.REGISTRY_NAME
        template_filepath = os.path.join(parent_path, template_filename)
        AllocationRegistry(filepath=template_filepath).write(registry_data=allocation_template)
        if emitter:
            emitter.echo(f"Saved allocation template file to {template_filepath}", color='blue', bold=True)

        already_enrolled = [a['beneficiary_address'] for a in allocations
                            if allocation_registry.is_beneficiary_enrolled(a['beneficiary_address'])]
        if already_enrolled:
            raise ValueError(f"The following beneficiaries are already enrolled in allocation registry "
                             f"({allocation_registry.filepath}): {already_enrolled}")

        # Deploy each allocation contract
        with click.progressbar(length=total_deployment_transactions,
                               label="Allocation progress",
                               show_eta=False) as bar:
            bar.short_limit = 0
            for allocation in allocations:

                # TODO: Check if allocation already exists in allocation registry

                beneficiary = allocation['beneficiary_address']
                name = allocation.get('name', 'No name provided')

                if interactive:
                    click.pause(info=f"\nPress any key to continue with allocation for "
                                     f"beneficiary {beneficiary} ({name})")

                if emitter:
                    emitter.echo(f"\nDeploying PreallocationEscrow contract for beneficiary {beneficiary} ({name})...")
                    bar._last_line = None
                    bar.render_progress()

                amount = allocation['amount']
                duration = allocation['duration_seconds']

                try:
                    self.transacting_power.activate()  # Activate the TransactingPower in case too much time has passed

                    deployer = self.deploy_preallocation_escrow(allocation_registry=allocation_registry,
                                                                progress=bar)

                    deployer.deliver(value=amount,
                                     duration=duration,
                                     beneficiary_address=beneficiary,
                                     progress=bar)
                except TransactionFailed as e:
                    if crash_on_failure:
                        raise
                    message = f"Failed allocation transaction for {NU.from_nunits(amount)} to {beneficiary}: {e}"
                    self.log.debug(message)
                    if emitter:
                        emitter.echo(message=message, color='red', bold=True)
                    failed.append(allocation)
                    continue

                else:
                    allocation_receipts[beneficiary] = deployer.deployment_receipts
                    allocation_contract_address = deployer.contract_address
                    self.log.info(f"Created {deployer.contract_name} contract at {allocation_contract_address} "
                                  f"for beneficiary {beneficiary}.")
                    allocated.append((allocation, allocation_contract_address))

                    # Create individual allocation file
                    individual_allocation_filename = f'allocation-{beneficiary}.json'
                    individual_allocation_filepath = os.path.join(parent_path, individual_allocation_filename)
                    individual_allocation_file_data = {
                        'beneficiary_address': beneficiary,
                        'contract_address': allocation_contract_address
                    }
                    with open(individual_allocation_filepath, 'w') as outfile:
                        json.dump(individual_allocation_file_data, outfile)

                    if emitter:
                        blockchain = BlockchainInterfaceFactory.get_interface()
                        paint_contract_deployment(contract_name=deployer.contract_name,
                                                  receipts=deployer.deployment_receipts,
                                                  contract_address=deployer.contract_address,
                                                  emitter=emitter,
                                                  chain_name=blockchain.client.chain_name,
                                                  open_in_browser=False)
                        emitter.echo(f"Saved individual allocation file to {individual_allocation_filepath}",
                                     color='blue', bold=True)

            if emitter:
                paint_deployed_allocations(emitter, allocated, failed)

            csv_filename = f'allocation-summary-{self.deployer_address[:6]}-{maya.now().epoch}.csv'
            csv_filepath = os.path.join(parent_path, csv_filename)
            write_deployed_allocations_to_csv(csv_filepath, allocated, failed)
            if emitter:
                emitter.echo(f"Saved allocation summary CSV to {csv_filepath}", color='blue', bold=True)

            if failed:
                self.log.critical(f"FAILED TOKEN ALLOCATION - {len(failed)} allocations failed.")

        return allocation_receipts

    @staticmethod
    def __read_allocation_data(filepath: str) -> list:
        with open(filepath, 'r') as allocation_file:
            if filepath.endswith(".csv"):
                reader = csv.DictReader(allocation_file)
                allocation_data = list(reader)
            else:  # Assume it's JSON by default
                allocation_data = json.load(allocation_file)

        # Pre-process allocation data
        for entry in allocation_data:
            entry['beneficiary_address'] = to_checksum_address(entry['beneficiary_address'])
            entry['amount'] = int(entry['amount'])
            entry['duration_seconds'] = int(entry['duration_seconds'])

        return allocation_data

    def deploy_beneficiaries_from_file(self,
                                       allocation_data_filepath: str,
                                       allocation_outfile: str = None,
                                       emitter=None,
                                       interactive=None) -> dict:

        allocations = self.__read_allocation_data(filepath=allocation_data_filepath)
        receipts = self.deploy_beneficiary_contracts(allocations=allocations,
                                                     allocation_outfile=allocation_outfile,
                                                     emitter=emitter,
                                                     interactive=interactive,
                                                     crash_on_failure=False)
        # Save transaction metadata
        receipts_filepath = self.save_deployment_receipts(receipts=receipts, filename_prefix='allocation')
        if emitter:
            emitter.echo(f"Saved allocation receipts to {receipts_filepath}", color='blue', bold=True)
        return receipts

    def save_deployment_receipts(self, receipts: dict, filename_prefix: str = 'deployment') -> str:
        filename = f'{filename_prefix}-receipts-{self.deployer_address[:6]}-{maya.now().epoch}.json'
        filepath = os.path.join(DEFAULT_CONFIG_ROOT, filename)
        # TODO: Do not assume default config root
        os.makedirs(DEFAULT_CONFIG_ROOT, exist_ok=True)
        with open(filepath, 'w') as file:
            data = dict()
            for contract_name, contract_receipts in receipts.items():
                contract_records = dict()
                for tx_name, receipt in contract_receipts.items():
                    # Formatting
                    pretty_receipt = {item: str(result) for item, result in receipt.items()}
                    contract_records[tx_name] = pretty_receipt
                data[contract_name] = contract_records
            data = json.dumps(data, indent=4)
            file.write(data)
        return filepath
class TesterBlockchain(Blockchain):
    """
    Blockchain subclass with additional test utility methods and options.
    """

    _PROVIDER_URI = 'tester://pyevm'
    _instance = NO_BLOCKCHAIN_AVAILABLE
    _test_account_cache = list()

    _default_test_accounts = NUMBER_OF_ETH_TEST_ACCOUNTS

    # Reserved addresses
    _ETHERBASE = 0
    _ALICE = 1
    _BOB = 2
    _FIRST_URSULA = 5
    _ursulas_range = range(NUMBER_OF_URSULAS_IN_BLOCKCHAIN_TESTS)

    def __init__(self,
                 test_accounts=None,
                 poa=True,
                 airdrop=False,
                 *args,
                 **kwargs):
        if test_accounts is None:
            test_accounts = self._default_test_accounts

        super().__init__(*args, **kwargs)
        self.log = Logger("test-blockchain")
        self.attach_middleware(w3=self.interface.w3, poa=poa)

        # Generate additional ethereum accounts for testing
        population = test_accounts
        enough_accounts = len(self.interface.w3.eth.accounts) >= population
        if not enough_accounts:
            accounts_to_make = population - len(self.interface.w3.eth.accounts)
            self.__generate_insecure_unlocked_accounts(
                quantity=accounts_to_make)
            assert test_accounts == len(self.interface.w3.eth.accounts)

        if airdrop is True:  # ETH for everyone!
            self.ether_airdrop(amount=DEVELOPMENT_ETH_AIRDROP_AMOUNT)

    @staticmethod
    def free_gas_price_strategy(w3, transaction_params=None):
        return 0

    def attach_middleware(self,
                          w3,
                          poa: bool = True,
                          free_transactions: bool = True):

        # For use with Proof-Of-Authority test-blockchains
        if poa:
            w3 = self.interface.w3
            w3.middleware_onion.inject(geth_poa_middleware, layer=0)

        # Free transaction gas!!
        if free_transactions:
            w3.eth.setGasPriceStrategy(self.free_gas_price_strategy)

    @classmethod
    def sever_connection(cls) -> None:
        cls._instance = NO_BLOCKCHAIN_AVAILABLE

    def __generate_insecure_unlocked_accounts(self,
                                              quantity: int) -> List[str]:
        """
        Generate additional unlocked accounts transferring a balance to each account on creation.
        """
        addresses = list()
        for _ in range(quantity):
            privkey = '0x' + os.urandom(32).hex()
            address = self.interface.provider.ethereum_tester.add_account(
                privkey)
            addresses.append(address)
            self._test_account_cache.append(address)
            self.log.info('Generated new insecure account {}'.format(address))

        return addresses

    def ether_airdrop(self, amount: int) -> List[str]:
        """Airdrops ether from creator address to all other addresses!"""

        coinbase, *addresses = self.interface.w3.eth.accounts

        tx_hashes = list()
        for address in addresses:

            tx = {'to': address, 'from': coinbase, 'value': amount}
            txhash = self.interface.w3.eth.sendTransaction(tx)

            _receipt = self.wait_for_receipt(txhash)
            tx_hashes.append(txhash)
            eth_amount = Web3().fromWei(amount, 'ether')
            self.log.info("Airdropped {} ETH {} -> {}".format(
                eth_amount, tx['from'], tx['to']))

        return tx_hashes

    def time_travel(self,
                    hours: int = None,
                    seconds: int = None,
                    periods: int = None):
        """
        Wait the specified number of wait_hours by comparing
        block timestamps and mines a single block.
        """

        more_than_one_arg = sum(map(bool, (hours, seconds, periods))) > 1
        if more_than_one_arg:
            raise ValueError(
                "Specify hours, seconds, or lock_periods, not a combination")

        if periods:
            duration = (TokenEconomics.hours_per_period * periods) * (60 * 60)
            base = TokenEconomics.hours_per_period * 60 * 60
        elif hours:
            duration = hours * (60 * 60)
            base = 60 * 60
        elif seconds:
            duration = seconds
            base = 1
        else:
            raise ValueError("Specify either hours, seconds, or lock_periods.")

        now = self.interface.w3.eth.getBlock(
            block_identifier='latest').timestamp
        end_timestamp = ((now + duration) // base) * base

        self.interface.w3.eth.web3.testing.timeTravel(timestamp=end_timestamp)
        self.interface.w3.eth.web3.testing.mine(1)
        self.log.info("Time traveled to {}".format(end_timestamp))

    @classmethod
    def connect(cls, *args, **kwargs) -> 'TesterBlockchain':
        interface = BlockchainDeployerInterface(
            provider_uri=cls._PROVIDER_URI,
            compiler=SolidityCompiler(test_contract_dir=CONTRACT_ROOT),
            registry=InMemoryEthereumContractRegistry())

        testerchain = TesterBlockchain(interface=interface, *args, **kwargs)
        return testerchain

    @classmethod
    def bootstrap_network(
            cls
    ) -> Tuple['TesterBlockchain', Dict[str, EthereumContractAgent]]:
        testerchain = cls.connect()

        origin = testerchain.interface.w3.eth.accounts[0]
        deployer = Deployer(blockchain=testerchain,
                            deployer_address=origin,
                            bare=True)

        random_deployment_secret = partial(
            os.urandom, DispatcherDeployer.DISPATCHER_SECRET_LENGTH)
        _txhashes, agents = deployer.deploy_network_contracts(
            miner_secret=random_deployment_secret(),
            policy_secret=random_deployment_secret(),
            adjudicator_secret=random_deployment_secret())
        return testerchain, agents

    @property
    def etherbase_account(self):
        return self.interface.w3.eth.accounts[self._ETHERBASE]

    @property
    def alice_account(self):
        return self.interface.w3.eth.accounts[self._ALICE]

    @property
    def bob_account(self):
        return self.interface.w3.eth.accounts[self._BOB]

    def ursula_account(self, index):
        if index not in self._ursulas_range:
            raise ValueError(
                f"Ursula index must be lower than {NUMBER_OF_URSULAS_IN_BLOCKCHAIN_TESTS}"
            )
        return self.interface.w3.eth.accounts[index + self._FIRST_URSULA]

    @property
    def ursulas_accounts(self):
        return [self.ursula_account(i) for i in self._ursulas_range]

    @property
    def unassigned_accounts(self):
        assigned_accounts = set(
            self.ursulas_accounts +
            [self.etherbase_account, self.alice_account, self.bob_account])
        accounts = set(self.interface.w3.eth.accounts)
        return list(accounts.difference(assigned_accounts))
Exemple #32
0
class PostgresListenerService(Service, object):
    """Listens for NOTIFY messages from postgres.

    A new connection is made to postgres with the isolation level of
    autocommit. This connection is only used for listening for notifications.
    Any query that needs to take place because of a notification should use
    its own connection. This class runs inside of the reactor. Any long running
    action that occurrs based on a notification should defer its action to a
    thread to not block the reactor.

    :ivar connection: A database connection within one of Django's wrapper.
    :ivar connectionFileno: The fileno of the underlying database connection.
    :ivar connecting: a :class:`Deferred` while connecting, `None` at all
        other times.
    :ivar disconnecting: a :class:`Deferred` while disconnecting, `None`
        at all other times.
    """

    # Seconds to wait to handle new notifications. When the notifications set
    # is empty it will wait this amount of time to check again for new
    # notifications.
    HANDLE_NOTIFY_DELAY = 0.5
    CHANNEL_REGISTRAR_DELAY = 0.5

    def __init__(self, alias="default"):
        self.alias = alias
        self.listeners = defaultdict(list)
        self.autoReconnect = False
        self.connection = None
        self.connectionFileno = None
        self.notifications = set()
        self.notifier = task.LoopingCall(self.handleNotifies)
        self.notifierDone = None
        self.connecting = None
        self.disconnecting = None
        self.registeredChannels = set()
        self.channelRegistrar = task.LoopingCall(
            lambda: ensureDeferred(self.registerChannels()))
        self.channelRegistrarDone = None
        self.log = Logger(__name__, self)
        self.events = EventGroup("connected", "disconnected")
        # the connection object isn't threadsafe, so we need to lock in order
        # to use it in different threads
        self._db_lock = threading.RLock()

    def startService(self):
        """Start the listener."""
        super().startService()
        self.autoReconnect = True
        return self.tryConnection()

    def stopService(self):
        """Stop the listener."""
        super().stopService()
        self.autoReconnect = False
        return self.loseConnection()

    def connected(self):
        """Return True if connected."""
        if self.connection is None:
            return False
        if self.connection.connection is None:
            return False
        return self.connection.connection.closed == 0

    def logPrefix(self):
        """Return nice name for twisted logging.

        This is required to satisfy `IReadDescriptor`, which inherits from
        `ILoggingContext`.
        """
        return self.log.namespace

    def isSystemChannel(self, channel):
        """Return True if channel is a system channel."""
        return channel.startswith("sys_")

    def doRead(self):
        """Poll the connection and process any notifications."""
        with self._db_lock:
            try:
                self.connection.connection.poll()
            except Exception:
                # If the connection goes down then `OperationalError` is raised.
                # It contains no pgcode or pgerror to identify the reason so no
                # special consideration can be made for it. Hence all errors are
                # treated the same, and we assume that the connection is broken.
                #
                # We do NOT return a failure, which would signal to the reactor
                # that the connection is broken in some way, because the reactor
                # will end up removing this instance from its list of selectables
                # but not from its list of readable fds, or something like that.
                # The point is that the reactor's accounting gets muddled. Things
                # work correctly if we manage the disconnection ourselves.
                #
                self.loseConnection(Failure(error.ConnectionLost()))
            else:
                self._process_notifies()

    def fileno(self):
        """Return the fileno of the connection."""
        return self.connectionFileno

    def startReading(self):
        """Add this listener to the reactor."""
        self.connectionFileno = self.connection.connection.fileno()
        reactor.addReader(self)

    def stopReading(self):
        """Remove this listener from the reactor."""
        try:
            reactor.removeReader(self)
        except IOError as error:
            # ENOENT here means that the fd has already been unregistered
            # from the underlying poller. It is as yet unclear how we get
            # into this state, so for now we ignore it. See epoll_ctl(2).
            if error.errno != ENOENT:
                raise
        finally:
            self.connectionFileno = None

    def register(self, channel, handler):
        """Register listening for notifications from a channel.

        When a notification is received for that `channel` the `handler` will
        be called with the action and object id.
        """
        self.log.debug(f"Register on {channel} with handler {handler}")
        handlers = self.listeners[channel]
        if self.isSystemChannel(channel) and len(handlers) > 0:
            # A system can only be registered once. This is because the
            # message is passed directly to the handler and the `doRead`
            # method does not wait for it to finish if its a defer. This is
            # different from normal handlers where we will call each and wait
            # for all to resolve before continuing to the next event.
            raise PostgresListenerRegistrationError(
                "System channel '%s' has already been registered." % channel)
        else:
            handlers.append(handler)
        self.runChannelRegistrar()

    def unregister(self, channel, handler):
        """Unregister listening for notifications from a channel.

        `handler` needs to be same handler that was registered.
        """
        self.log.debug(f"Unregister on {channel} with handler {handler}")
        if channel not in self.listeners:
            raise PostgresListenerUnregistrationError(
                "Channel '%s' is not registered with the listener." % channel)
        handlers = self.listeners[channel]
        if handler in handlers:
            handlers.remove(handler)
        else:
            raise PostgresListenerUnregistrationError(
                "Handler is not registered on that channel '%s'." % channel)
        if len(handlers) == 0:
            # Channels have already been registered. Unregister the channel.
            del self.listeners[channel]
        self.runChannelRegistrar()

    @synchronous
    def createConnection(self):
        """Create new database connection."""
        db = connections.databases[self.alias]
        backend = load_backend(db["ENGINE"])
        return backend.DatabaseWrapper(db, self.alias)

    @synchronous
    def startConnection(self):
        """Start the database connection."""
        self.connection = self.createConnection()
        self.connection.connect()
        self.connection.set_autocommit(True)
        self.connection.inc_thread_sharing()

    @synchronous
    def stopConnection(self):
        """Stop database connection."""
        # The connection is often in an unexpected state here -- for
        # unexplained reasons -- so be careful when unpealing layers.
        connection_wrapper, self.connection = self.connection, None
        if connection_wrapper is not None:
            connection = connection_wrapper.connection
            if connection is not None and not connection.closed:
                connection_wrapper.dec_thread_sharing()
                connection_wrapper.commit()
                connection_wrapper.close()

    def tryConnection(self):
        """Keep retrying to make the connection."""
        if self.connecting is None:
            if self.disconnecting is not None:
                raise RuntimeError(
                    "Cannot attempt to make new connection before "
                    "pending disconnection has finished.")

            def cb_connect(_):
                self.log.info("Listening for database notifications.")

            def eb_connect(failure):
                self.log.error(
                    "Unable to connect to database: {error}",
                    error=failure.getErrorMessage(),
                )
                if failure.check(CancelledError):
                    return failure
                elif self.autoReconnect:
                    return deferLater(reactor, 3, connect)
                else:
                    return failure

            def connect(interval=self.HANDLE_NOTIFY_DELAY):
                d = deferToThread(self.startConnection)
                d.addCallback(callOut, self.runChannelRegistrar)
                d.addCallback(lambda result: self.channelRegistrarDone)
                d.addCallback(callOut, self.events.connected.fire)
                d.addCallback(callOut, self.startReading)
                d.addCallback(callOut, self.runHandleNotify, interval)
                # On failure ensure that the database connection is stopped.
                d.addErrback(callOut, deferToThread, self.stopConnection)
                d.addCallbacks(cb_connect, eb_connect)
                return d

            def done():
                self.connecting = None

            self.connecting = connect().addBoth(callOut, done)

        return self.connecting

    def loseConnection(self, reason=Failure(error.ConnectionDone())):
        """Request that the connection be dropped."""
        if self.disconnecting is None:
            self.registeredChannels.clear()
            d = self.disconnecting = Deferred()
            d.addBoth(callOut, self.stopReading)
            d.addBoth(callOut, self.cancelChannelRegistrar)
            d.addBoth(callOut, self.cancelHandleNotify)
            d.addBoth(callOut, deferToThread, self.stopConnection)
            d.addBoth(callOut, self.connectionLost, reason)

            def done():
                self.disconnecting = None

            d.addBoth(callOut, done)

            if self.connecting is None:
                # Already/never connected: begin shutdown now.
                self.disconnecting.callback(None)
            else:
                # Still connecting: cancel before disconnect.
                self.connecting.addErrback(suppress, CancelledError)
                self.connecting.chainDeferred(self.disconnecting)
                self.connecting.cancel()

        return self.disconnecting

    def connectionLost(self, reason):
        """Reconnect when the connection is lost."""
        self.connection = None
        if reason.check(error.ConnectionDone):
            self.log.debug("Connection closed.")
        elif reason.check(error.ConnectionLost):
            self.log.debug("Connection lost.")
        else:
            self.log.failure("Connection lost.", reason)
        if self.autoReconnect:
            reactor.callLater(3, self.tryConnection)
        self.events.disconnected.fire(reason)

    def registerChannel(self, channel):
        """Register the channel."""
        self.log.debug(f"Register Channel {channel}")
        with self._db_lock, self.connection.cursor() as cursor:
            if self.isSystemChannel(channel):
                # This is a system channel so listen only called once.
                cursor.execute("LISTEN %s;" % channel)
            else:
                # Not a system channel so listen called once for each action.
                for action in sorted(map_enum(ACTIONS).values()):
                    cursor.execute("LISTEN %s_%s;" % (channel, action))

    def unregisterChannel(self, channel):
        """Unregister the channel."""
        self.log.debug(f"Unregister Channel {channel}")
        with self._db_lock, self.connection.cursor() as cursor:
            if self.isSystemChannel(channel):
                # This is a system channel so unlisten only called once.
                cursor.execute("UNLISTEN %s;" % channel)
            else:
                # Not a system channel so unlisten called once for each action.
                for action in sorted(map_enum(ACTIONS).values()):
                    cursor.execute("UNLISTEN %s_%s;" % (channel, action))

    async def registerChannels(self):
        """Listen/unlisten to channels that were registered/unregistered.

        When a call to register() or unregister() is made, the listeners
        dict is updated, and the keys of that dict represents all the
        channels that we should listen to.

        The service keeps a list of channels that it already listens to
        in the registeredChannels dict. We issue a call to postgres to
        listen to all channels that are in listeners but not in
        registeredChannels, and a call to unlisten for all channels that
        are in registeredChannels but not in listeners.
        """
        to_register = set(self.listeners).difference(self.registeredChannels)
        to_unregister = self.registeredChannels.difference(self.listeners)
        # If there's nothing to do, we can stop the loop. If there is
        # any work to be done, we do the work, and then check
        # whether we should stop at the beginning of the next loop
        # iteration. The reason is that every time we yield, another
        # deferred might call register() or unregister().
        if not to_register and not to_unregister:
            self.channelRegistrar.stop()
        else:
            for channel in to_register:
                await deferToThread(self.registerChannel, channel)
                self.registeredChannels.add(channel)
            for channel in to_unregister:
                await deferToThread(self.unregisterChannel, channel)
                self.registeredChannels.remove(channel)

    def convertChannel(self, channel):
        """Convert the postgres channel to a registered channel and action.

        :raise PostgresListenerNotifyError: When {channel} is not registered or
            {action} is not in `ACTIONS`.
        """
        channel, action = channel.split("_", 1)
        if channel not in self.listeners:
            raise PostgresListenerNotifyError(
                "%s is not a registered channel." % channel)
        if action not in map_enum(ACTIONS).values():
            raise PostgresListenerNotifyError("%s action is not supported." %
                                              action)
        return channel, action

    def runChannelRegistrar(self):
        """Start the loop for listening to channels in postgres.

        It will only start if the service is connected to postgres.
        """
        if self.connection is not None and not self.channelRegistrar.running:
            self.channelRegistrarDone = self.channelRegistrar.start(
                self.CHANNEL_REGISTRAR_DELAY, now=True)

    def cancelChannelRegistrar(self):
        """Stop the loop for listening to channels in postgres."""
        if self.channelRegistrar.running:
            self.channelRegistrar.stop()
            return self.channelRegistrarDone
        else:
            return succeed(None)

    def runHandleNotify(self, delay=0, clock=reactor):
        """Defer later the `handleNotify`."""
        if not self.notifier.running:
            self.notifierDone = self.notifier.start(delay, now=False)

    def cancelHandleNotify(self):
        """Cancel the deferred `handleNotify` call."""
        if self.notifier.running:
            self.notifier.stop()
            return self.notifierDone
        else:
            return succeed(None)

    def handleNotifies(self, clock=reactor):
        """Process all notify message in the notifications set."""
        def gen_notifications(notifications):
            while len(notifications) != 0:
                yield notifications.pop()

        return task.coiterate(
            self.handleNotify(notification, clock=clock)
            for notification in gen_notifications(self.notifications))

    def handleNotify(self, notification, clock=reactor):
        """Process a notify message in the notifications set."""
        channel, payload = notification
        try:
            channel, action = self.convertChannel(channel)
        except PostgresListenerNotifyError:
            # Log the error and continue processing the remaining
            # notifications.
            self.log.failure("Failed to convert channel {channel!r}.",
                             channel=channel)
        else:
            defers = []
            handlers = self.listeners[channel]
            # XXX: There could be an arbitrary number of listeners. Should we
            # limit concurrency here? Perhaps even do one at a time.
            for handler in handlers:
                d = defer.maybeDeferred(handler, action, payload)
                d.addErrback(lambda failure: self.log.failure(
                    "Failure while handling notification to {channel!r}: "
                    "{payload!r}",
                    failure,
                    channel=channel,
                    payload=payload,
                ))
                defers.append(d)
            return defer.DeferredList(defers)

    def _process_notifies(self):
        """Add each notify to to the notifications set.

        This removes duplicate notifications when one entity in the database is
        updated multiple times in a short interval. Accumulating notifications
        and allowing the listener to pick them up in batches is imperfect but
        good enough, and simple.

        """
        notifies = self.connection.connection.notifies
        for notify in notifies:
            if self.isSystemChannel(notify.channel):
                # System level message; pass it to the registered
                # handler immediately.
                if notify.channel in self.listeners:
                    # Be defensive in that if a handler does not exist
                    # for this channel then the channel should be
                    # unregisted and removed from listeners.
                    if len(self.listeners[notify.channel]) > 0:
                        handler = self.listeners[notify.channel][0]
                        handler(notify.channel, notify.payload)
                    else:
                        self.unregisterChannel(notify.channel)
                        del self.listeners[notify.channel]
                else:
                    # Unregister the channel since no listener is
                    # registered for this channel.
                    self.unregisterChannel(notify.channel)
            else:
                # Place non-system messages into the queue to be
                # processed.
                self.notifications.add((notify.channel, notify.payload))
        # Delete the contents of the connection's notifies list so
        # that we don't process them a second time.
        del notifies[:]
Exemple #33
0
class TesterBlockchain(Blockchain):
    """
    Blockchain subclass with additional test utility methods and options.
    """

    _PROVIDER_URI = 'tester://pyevm'
    _instance = NO_BLOCKCHAIN_AVAILABLE
    _test_account_cache = list()
    _default_test_accounts = 10

    def __init__(self, test_accounts=None, poa=True, airdrop=False, *args, **kwargs):
        if test_accounts is None:
            test_accounts = self._default_test_accounts

        super().__init__(*args, **kwargs)

        self.log = Logger("test-blockchain")  # type: Logger

        # For use with Proof-Of-Authority test-blockchains
        if poa is True:
            w3 = self.interface.w3
            w3.middleware_onion.inject(geth_poa_middleware, layer=0)

        # Generate additional ethereum accounts for testing
        enough_accounts = len(self.interface.w3.eth.accounts) >= NUMBER_OF_URSULAS_IN_DEVELOPMENT_NETWORK
        if test_accounts is not None and not enough_accounts:

            accounts_to_make = NUMBER_OF_URSULAS_IN_DEVELOPMENT_NETWORK - len(self.interface.w3.eth.accounts)
            test_accounts = test_accounts if test_accounts is not None else NUMBER_OF_URSULAS_IN_DEVELOPMENT_NETWORK

            self.__generate_insecure_unlocked_accounts(quantity=accounts_to_make)

            assert test_accounts == len(self.interface.w3.eth.accounts)

        if airdrop is True:  # ETH for everyone!
            self.ether_airdrop(amount=DEVELOPMENT_ETH_AIRDROP_AMOUNT)

    @classmethod
    def sever_connection(cls) -> None:
        cls._instance = NO_BLOCKCHAIN_AVAILABLE

    def __generate_insecure_unlocked_accounts(self, quantity: int) -> List[str]:
        """
        Generate additional unlocked accounts transferring a balance to each account on creation.
        """
        addresses = list()
        insecure_password = INSECURE_DEVELOPMENT_PASSWORD
        for _ in range(quantity):

            umbral_priv_key = UmbralPrivateKey.gen_key()
            address = self.interface.w3.personal.importRawKey(private_key=umbral_priv_key.to_bytes(),
                                                              passphrase=insecure_password)

            assert self.interface.unlock_account(address, password=insecure_password, duration=None), 'Failed to unlock {}'.format(address)
            addresses.append(address)
            self._test_account_cache.append(address)
            self.log.info('Generated new insecure account {}'.format(address))

        return addresses

    def ether_airdrop(self, amount: int) -> List[str]:
        """Airdrops ether from creator address to all other addresses!"""

        coinbase, *addresses = self.interface.w3.eth.accounts

        tx_hashes = list()
        for address in addresses:

            tx = {'to': address, 'from': coinbase, 'value': amount}
            txhash = self.interface.w3.eth.sendTransaction(tx)

            _receipt = self.wait_for_receipt(txhash)
            tx_hashes.append(txhash)
            self.log.info("Airdropped {} ETH {} -> {}".format(amount, tx['from'], tx['to']))

        return tx_hashes

    def time_travel(self, hours: int=None, seconds: int=None, periods: int=None):
        """
        Wait the specified number of wait_hours by comparing
        block timestamps and mines a single block.
        """

        more_than_one_arg = sum(map(bool, (hours, seconds, periods))) > 1
        if more_than_one_arg:
            raise ValueError("Specify hours, seconds, or lock_periods, not a combination")

        if periods:
            duration = (constants.HOURS_PER_PERIOD * periods) * (60*60)
            base = constants.HOURS_PER_PERIOD * 60 * 60
        elif hours:
            duration = hours * (60*60)
            base = 60 * 60
        elif seconds:
            duration = seconds
            base = 1
        else:
            raise ValueError("Specify either hours, seconds, or lock_periods.")

        now = self.interface.w3.eth.getBlock(block_identifier='latest').timestamp
        end_timestamp = ((now+duration)//base) * base

        self.interface.w3.eth.web3.testing.timeTravel(timestamp=end_timestamp)
        self.interface.w3.eth.web3.testing.mine(1)
        self.log.info("Time traveled to {}".format(end_timestamp))

    @classmethod
    def connect(cls, *args, **kwargs) -> 'TesterBlockchain':
        interface = BlockchainDeployerInterface(provider_uri=cls._PROVIDER_URI,
                                                compiler=SolidityCompiler(test_contract_dir=CONTRACT_ROOT),
                                                registry=InMemoryEthereumContractRegistry())

        testerchain = TesterBlockchain(interface=interface, *args, **kwargs)
        return testerchain

    @classmethod
    def bootstrap_network(cls) -> Tuple['TesterBlockchain', Dict[str, EthereumContractAgent]]:
        testerchain = cls.connect()

        origin = testerchain.interface.w3.eth.accounts[0]
        deployer = Deployer(blockchain=testerchain, deployer_address=origin, bare=True)

        random_deployment_secret = partial(os.urandom, DISPATCHER_SECRET_LENGTH)
        _txhashes, agents = deployer.deploy_network_contracts(miner_secret=random_deployment_secret(),
                                                              policy_secret=random_deployment_secret())
        return testerchain, agents
Exemple #34
0
class BlockchainInterface:
    """
    Interacts with a solidity compiler and a registry in order to instantiate compiled
    ethereum contracts with the given web3 provider backend.
    """
    __default_timeout = 180  # seconds
    __default_transaction_gas = 500_000  # TODO #842: determine sensible limit and validate transactions

    process = None  # TODO
    Web3 = Web3

    class InterfaceError(Exception):
        pass

    class NoProvider(InterfaceError):
        pass

    class ConnectionFailed(InterfaceError):
        pass

    class UnknownContract(InterfaceError):
        pass

    def __init__(self,
                 provider_uri: str = None,
                 provider=None,
                 timeout: int = None,
                 registry: EthereumContractRegistry = None,
                 fetch_registry: bool = True,
                 compiler: SolidityCompiler = None) -> None:
        """
        A blockchain "network interface"; The circumflex wraps entirely around the bounds of
        contract operations including compilation, deployment, and execution.

         Filesystem          Configuration           Node              Client                  EVM
        ================ ====================== =============== =====================  ===========================

         Solidity Files -- SolidityCompiler ---                  --- HTTPProvider ------ ...
                                               |                |
                                               |                |

                                              *BlockchainInterface* -- IPCProvider ----- External EVM (geth, parity...)

                                               |      |         |
                                               |      |         |
         Registry File -- ContractRegistry ---        |          ---- TestProvider ----- EthereumTester
                                                      |
                        |                             |                                         |
                        |                             |
                                                                                        PyEVM (Development Chain)
         Runtime Files --                 -------- Blockchain
                                         |
                        |                |             |

         Key Files ------ NodeConfiguration -------- Agent ... (Contract API)

                        |                |             |
                        |                |
                        |                 ---------- Actor ... (Blockchain-Character API)
                        |
                        |                              |
                        |
         Config File ---                          Character ... (Public API)

                                                       |

                                                     Human


        The BlockchainInterface is the junction of the solidity compiler, a contract registry, and a collection of
        web3 network providers as a means of interfacing with the ethereum blockchain to execute
        or deploy contract code on the network.


        Compiler and Registry Usage
        -----------------------------

        Contracts are freshly re-compiled if an instance of SolidityCompiler is passed; otherwise,
        The registry will read contract data saved to disk that is be used to retrieve contact address and op-codes.
        Optionally, A registry instance can be passed instead.


        Provider Usage
        ---------------
        https: // github.com / ethereum / eth - tester     # available-backends


        * HTTP Provider - Web3 HTTP provider, typically JSON RPC 2.0 over HTTP
        * Websocket Provider - Web3 WS provider, typically JSON RPC 2.0 over WS, supply endpoint uri and websocket=True
        * IPC Provider - Web3 File based IPC provider transported over standard I/O
        * Custom Provider - A pre-initialized web3.py provider instance to attach to this interface

        """

        self.log = Logger("blockchain-interface")

        self.client = NO_BLOCKCHAIN_CONNECTION
        self.__provider = provider or NO_BLOCKCHAIN_CONNECTION
        self.provider_uri = NO_BLOCKCHAIN_CONNECTION
        self.timeout = timeout if timeout is not None else self.__default_timeout
        self.registry = registry

        # Connect to Provider
        self._connect(provider=provider, provider_uri=provider_uri)

        # Establish contact with NuCypher contracts
        if not registry:
            self._configure_registry(fetch_registry=fetch_registry)
        self._setup_solidity(compiler=compiler)

    def __repr__(self):
        r = '{name}({uri})'.format(name=self.__class__.__name__,
                                   uri=self.provider_uri)
        return r

    def __getattr__(self, name):
        """

        MAGIC...

        allows the interface class to defer to methods of its client
        or its client.w3

        for example:
            methods/properties of w3 can be called through eg. interface.toWei()
            if a particular eth provider needs a different method,
            override that method for that provider's client
        """

        # does BlockchainInterface have this attr/method?
        if name not in self.__dict__:

            # do we have a client?
            if self.client is not NO_BLOCKCHAIN_CONNECTION:

                # does the client have this property/method?
                # most likely it is because of an implementation difference
                # between parity/geth/etc.
                if hasattr(self.client, name):
                    return getattr(self.client, name)

                # ok, does w3 have it?
                if hasattr(self.client.w3, name):
                    return getattr(self.client.w3, name)

        # return the default getattr behavior (could be an AttributeError)
        return object.__getattribute__(self, name)

    @property
    def client_version(self) -> str:
        if self.__provider is NO_BLOCKCHAIN_CONNECTION:
            return "Unknown"

        return self.client.node_version

    def _connect(self,
                 provider: Web3Providers = None,
                 provider_uri: str = None):

        self._attach_provider(provider=provider, provider_uri=provider_uri)
        self.log.info("Connecting to {}".format(self.provider_uri))

        if self.__provider is NO_BLOCKCHAIN_CONNECTION:
            raise self.NoProvider(
                "There are no configured blockchain providers")

        # Connect if not connected
        try:
            w3 = self.Web3(provider=self.__provider)
            self.client = Web3Client.from_w3(w3=w3)

        except requests.ConnectionError:  # RPC
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is RPC enabled?'
            )

        except FileNotFoundError:  # IPC File Protocol
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is IPC enabled?'
            )

        # Check connection
        return self.is_connected

    @property
    def provider(self) -> Union[IPCProvider, WebsocketProvider, HTTPProvider]:
        return self.__provider

    @property
    def is_connected(self) -> bool:
        """
        https://web3py.readthedocs.io/en/stable/__provider.html#examples-using-automated-detection
        """
        return self.client.is_connected

    @property
    def _node_technology(self):
        if self.client:
            return self.client.node_technology
        return NO_BLOCKCHAIN_CONNECTION

    def _configure_registry(self, fetch_registry: bool = True):
        RegistryClass = EthereumContractRegistry._get_registry_class(
            local=self.client.is_local)
        if fetch_registry:
            registry = RegistryClass.from_latest_publication()
        else:
            registry = RegistryClass()

        self.registry = registry

    def _setup_solidity(self, compiler: SolidityCompiler = None):

        # if a SolidityCompiler class instance was passed, compile from solidity source code
        recompile = True if compiler is not None else False
        self.__recompile = recompile
        self.__sol_compiler = compiler

        self.log.info("Using contract registry {}".format(
            self.registry.filepath))

        if self.__recompile is True:
            # Execute the compilation if we're recompiling
            # Otherwise read compiled contract data from the registry
            interfaces = self.__sol_compiler.compile()
            __raw_contract_cache = interfaces
        else:
            __raw_contract_cache = NO_COMPILATION_PERFORMED
        self.__raw_contract_cache = __raw_contract_cache

    def _attach_provider(self,
                         provider: Web3Providers = None,
                         provider_uri: str = None) -> None:
        """
        https://web3py.readthedocs.io/en/latest/providers.html#providers
        """

        self.provider_uri = provider_uri or NO_BLOCKCHAIN_CONNECTION

        if not provider_uri and not provider:
            raise self.NoProvider("No URI or provider instances supplied.")

        if provider_uri and not provider:
            uri_breakdown = urlparse(provider_uri)

            if uri_breakdown.scheme == 'tester':
                providers = {
                    'pyevm': self._get_tester_pyevm,
                    'geth': self._get_test_geth_parity_provider,
                    'parity-ethereum': self._get_test_geth_parity_provider,
                }
                lookup_attr = uri_breakdown.netloc
            else:
                providers = {
                    'auto': self._get_auto_provider,
                    'infura': self._get_infura_provider,
                    'ipc': self._get_IPC_provider,
                    'file': self._get_IPC_provider,
                    'ws': self._get_websocket_provider,
                    'http': self._get_HTTP_provider,
                    'https': self._get_HTTP_provider,
                }
                lookup_attr = uri_breakdown.scheme
            try:
                self.__provider = providers[lookup_attr]()
            except KeyError:
                raise ValueError("{} is an invalid or unsupported blockchain"
                                 " provider URI".format(provider_uri))

    def _get_IPC_provider(self):
        uri_breakdown = urlparse(self.provider_uri)
        return IPCProvider(ipc_path=uri_breakdown.path, timeout=self.timeout)

    def _get_HTTP_provider(self):
        return HTTPProvider(endpoint_uri=self.provider_uri)

    def _get_websocket_provider(self):
        return WebsocketProvider(endpoint_uri=self.provider_uri)

    def _get_infura_provider(self):
        # https://web3py.readthedocs.io/en/latest/providers.html#infura-mainnet
        infura_envvar = 'WEB3_INFURA_API_SECRET'
        if infura_envvar not in os.environ:
            raise self.InterfaceError(
                f'{infura_envvar} must be set in order to use an Infura Web3 provider.'
            )
        from web3.auto.infura import w3
        connected = w3.isConnected()
        if not connected:
            raise self.InterfaceError(
                'Cannot auto-detect node.  Provide a full URI instead.')
        return w3.provider

    def _get_auto_provider(self):

        from web3.auto import w3
        # how-automated-detection-works: https://web3py.readthedocs.io/en/latest/providers.html
        connected = w3.isConnected()
        if not connected:
            raise self.InterfaceError(
                'Cannot auto-detect node.  Provide a full URI instead.')
        return w3.provider

    def _get_tester_pyevm(self):
        # https://web3py.readthedocs.io/en/latest/providers.html#httpprovider
        from nucypher.utilities.sandbox.constants import PYEVM_GAS_LIMIT, NUMBER_OF_ETH_TEST_ACCOUNTS

        # Initialize
        genesis_params = PyEVMBackend._generate_genesis_params(
            overrides={'gas_limit': PYEVM_GAS_LIMIT})
        pyevm_backend = PyEVMBackend(genesis_parameters=genesis_params)
        pyevm_backend.reset_to_genesis(
            genesis_params=genesis_params,
            num_accounts=NUMBER_OF_ETH_TEST_ACCOUNTS)

        # Test provider entry-point
        eth_tester = EthereumTester(backend=pyevm_backend,
                                    auto_mine_transactions=True)
        provider = EthereumTesterProvider(ethereum_tester=eth_tester)

        return provider

    def _get_test_geth_parity_provider(self):
        # geth --dev
        geth_process = NuCypherGethDevProcess()
        geth_process.start()
        geth_process.wait_for_ipc(timeout=30)
        provider = IPCProvider(ipc_path=geth_process.ipc_path,
                               timeout=self.timeout)

        #  TODO: this seems strange to modify a class attr here?
        BlockchainInterface.process = geth_process

        return provider

    def _get_tester_ganache(self, endpoint_uri=None):

        endpoint_uri = endpoint_uri or 'http://localhost:7545'
        return HTTPProvider(endpoint_uri=endpoint_uri)

    @classmethod
    def disconnect(cls):
        if BlockchainInterface.process:
            if BlockchainInterface.process.is_running:
                BlockchainInterface.process.stop()

    def get_contract_factory(self, contract_name: str) -> Contract:
        """Retrieve compiled interface data from the cache and return web3 contract"""
        try:
            interface = self.__raw_contract_cache[contract_name]
        except KeyError:
            raise self.UnknownContract(
                '{} is not a locally compiled contract.'.format(contract_name))
        except TypeError:
            if self.__raw_contract_cache is NO_COMPILATION_PERFORMED:
                message = "The local contract compiler cache is empty because no compilation was performed."
                raise self.InterfaceError(message)
            raise
        else:
            contract = self.client.w3.eth.contract(
                abi=interface['abi'],
                bytecode=interface['bin'],
                ContractFactoryClass=Contract)
            return contract

    def _wrap_contract(self,
                       wrapper_contract: Contract,
                       target_contract: Contract,
                       factory=Contract) -> Contract:
        """
        Used for upgradeable contracts; Returns a new contract object assembled
        with its own address but the abi of the other.
        """

        # Wrap the contract
        wrapped_contract = self.client.w3.eth.contract(
            abi=target_contract.abi,
            address=wrapper_contract.address,
            ContractFactoryClass=factory)
        return wrapped_contract

    def get_contract_by_address(self, address: str):
        """Read a single contract's data from the registrar and return it."""
        try:
            contract_records = self.registry.search(contract_address=address)
        except RuntimeError:
            # TODO #461: Integrate with Registry
            raise self.InterfaceError(
                f'Corrupted contract registry: {self.registry.filepath}.')
        else:
            if not contract_records:
                raise self.UnknownContract(
                    f"No such contract with address: {address}.")
            return contract_records[0]

    def get_proxy(self,
                  target_address: str,
                  proxy_name: str,
                  factory: Contract = Contract):

        # Lookup proxies; Search for a registered proxy that targets this contract record
        records = self.registry.search(contract_name=proxy_name)

        dispatchers = list()
        for name, addr, abi in records:
            proxy_contract = self.client.w3.eth.contract(
                abi=abi, address=addr, ContractFactoryClass=factory)

            # Read this dispatchers target address from the blockchain
            proxy_live_target_address = proxy_contract.functions.target().call(
            )

            if proxy_live_target_address == target_address:
                dispatchers.append(proxy_contract)

        if len(dispatchers) > 1:
            message = f"Multiple Dispatcher deployments are targeting {target_address}"
            raise self.InterfaceError(message)

        try:
            return dispatchers[0]
        except IndexError:
            raise self.UnknownContract(
                f"No registered Dispatcher deployments target {target_address}"
            )

    def get_contract_by_name(
            self,
            name: str,
            proxy_name: str = None,
            use_proxy_address: bool = True,
            factory: Contract = Contract) -> Union[Contract, List[tuple]]:
        """
        Instantiate a deployed contract from registry data,
        and assimilate it with it's proxy if it is upgradeable,
        or return all registered records if use_proxy_address is False.
        """
        target_contract_records = self.registry.search(contract_name=name)

        if not target_contract_records:
            raise self.UnknownContract(
                f"No such contract records with name {name}.")

        if proxy_name:  # It's upgradeable
            # Lookup proxies; Search fot a published proxy that targets this contract record

            proxy_records = self.registry.search(contract_name=proxy_name)

            results = list()
            for proxy_name, proxy_addr, proxy_abi in proxy_records:
                proxy_contract = self.client.w3.eth.contract(
                    abi=proxy_abi,
                    address=proxy_addr,
                    ContractFactoryClass=factory)

                # Read this dispatchers target address from the blockchain
                proxy_live_target_address = proxy_contract.functions.target(
                ).call()
                for target_name, target_addr, target_abi in target_contract_records:

                    if target_addr == proxy_live_target_address:
                        if use_proxy_address:
                            pair = (proxy_addr, target_abi)
                        else:
                            pair = (proxy_live_target_address, target_abi)
                    else:
                        continue

                    results.append(pair)

            if len(results) > 1:
                address, abi = results[0]
                message = "Multiple {} deployments are targeting {}".format(
                    proxy_name, address)
                raise self.InterfaceError(message.format(name))

            else:
                selected_address, selected_abi = results[0]

        else:  # It's not upgradeable
            if len(target_contract_records) != 1:
                m = "Multiple records registered for non-upgradeable contract {}"
                raise self.InterfaceError(m.format(name))
            _target_contract_name, selected_address, selected_abi = target_contract_records[
                0]

        # Create the contract from selected sources
        unified_contract = self.client.w3.eth.contract(
            abi=selected_abi,
            address=selected_address,
            ContractFactoryClass=factory)

        return unified_contract
Exemple #35
0
class BlockchainInterface:
    """
    Interacts with a solidity compiler and a registry in order to instantiate compiled
    ethereum contracts with the given web3 provider backend.
    """
    __default_timeout = 10  # seconds

    # __default_transaction_gas_limit = 500000  # TODO: determine sensible limit and validate transactions

    class InterfaceError(Exception):
        pass

    class NoProvider(InterfaceError):
        pass

    class ConnectionFailed(InterfaceError):
        pass

    class UnknownContract(InterfaceError):
        pass

    def __init__(self,
                 provider_uri: str = None,
                 provider=None,
                 auto_connect: bool = True,
                 timeout: int = None,
                 registry: EthereumContractRegistry = None,
                 compiler: SolidityCompiler = None) -> None:
        """
        A blockchain "network inerface"; The circumflex wraps entirely around the bounds of
        contract operations including compilation, deployment, and execution.

         Filesystem          Configuration           Node              Client                  EVM
        ================ ====================== =============== =====================  ===========================

         Solidity Files -- SolidityCompiler ---                  --- HTTPProvider ------ ...
                                               |                |
                                               |                |

                                              *BlockchainInterface* -- IPCProvider ----- External EVM (geth, parity...)

                                               |      |         |
                                               |      |         |
         Registry File -- ContractRegistry ---        |          ---- TestProvider ----- EthereumTester
                                                      |
                        |                             |                                         |
                        |                             |
                                                                                        PyEVM (Development Chain)
         Runtime Files --                 -------- Blockchain
                                         |
                        |                |             |

         Key Files ------ NodeConfiguration -------- Agent ... (Contract API)

                        |                |             |
                        |                |
                        |                 ---------- Actor ... (Blockchain-Character API)
                        |
                        |                              |
                        |
         Config File ---                          Character ... (Public API)

                                                       |

                                                     Human


        The BlockchainInterface is the junction of the solidity compiler, a contract registry, and a collection of
        web3 network providers as a means of interfacing with the ethereum blockchain to execute
        or deploy contract code on the network.


        Compiler and Registry Usage
        -----------------------------

        Contracts are freshly re-compiled if an instance of SolidityCompiler is passed; otherwise,
        The registry will read contract data saved to disk that is be used to retrieve contact address and op-codes.
        Optionally, A registry instance can be passed instead.


        Provider Usage
        ---------------
        https: // github.com / ethereum / eth - tester     # available-backends


        * HTTP Provider - supply endpiont_uri
        * Websocket Provider - supply endpoint uri and websocket=True
        * IPC Provider - supply IPC path
        * Custom Provider - supply an iterable of web3.py provider instances

        """

        self.log = Logger("blockchain-interface")  # type: Logger

        #
        # Providers
        #

        self.w3 = NO_BLOCKCHAIN_CONNECTION
        self.__provider = provider or NO_BLOCKCHAIN_CONNECTION
        self.provider_uri = NO_BLOCKCHAIN_CONNECTION
        self.timeout = timeout if timeout is not None else self.__default_timeout

        if provider_uri and provider:
            raise self.InterfaceError(
                "Pass a provider URI string, or a list of provider instances.")
        elif provider_uri:
            self.provider_uri = provider_uri
            self.add_provider(provider_uri=provider_uri)
        elif provider:
            self.provider_uri = MANUAL_PROVIDERS_SET
            self.add_provider(provider)
        else:
            self.log.warn(
                "No provider supplied for new blockchain interface; Using defaults"
            )

        # if a SolidityCompiler class instance was passed, compile from solidity source code
        recompile = True if compiler is not None else False
        self.__recompile = recompile
        self.__sol_compiler = compiler

        # Setup the registry and base contract factory cache
        registry = registry if registry is not None else EthereumContractRegistry(
        )
        self.registry = registry
        self.log.info("Using contract registry {}".format(
            self.registry.filepath))

        if self.__recompile is True:
            # Execute the compilation if we're recompiling
            # Otherwise read compiled contract data from the registry
            interfaces = self.__sol_compiler.compile()
            __raw_contract_cache = interfaces
        else:
            __raw_contract_cache = NO_COMPILATION_PERFORMED
        self.__raw_contract_cache = __raw_contract_cache

        # Auto-connect
        self.autoconnect = auto_connect
        if self.autoconnect is True:
            self.connect()

    def __repr__(self):
        r = '{name}({uri})'.format(name=self.__class__.__name__,
                                   uri=self.provider_uri)
        return r

    def connect(self):
        self.log.info("Connecting to {}".format(self.provider_uri))

        if self.__provider is NO_BLOCKCHAIN_CONNECTION:
            raise self.NoProvider(
                "There are no configured blockchain providers")

        # Connect
        web3_instance = Web3(
            provider=self.__provider)  # Instantiate Web3 object with provider
        self.w3 = web3_instance

        # Check connection
        if not self.is_connected:
            raise self.ConnectionFailed(
                'Failed to connect to provider: {}'.format(self.__provider))

        if self.is_connected:
            self.log.info('Successfully Connected to {}'.format(
                self.provider_uri))
            return self.is_connected
        else:
            raise self.ConnectionFailed("Failed to connect to {}.".format(
                self.provider_uri))

    @property
    def provider(self) -> Union[IPCProvider, WebsocketProvider, HTTPProvider]:
        return self.__provider

    @property
    def is_connected(self) -> bool:
        """
        https://web3py.readthedocs.io/en/stable/__provider.html#examples-using-automated-detection
        """
        return self.w3.isConnected()

    @property
    def node_version(self) -> str:
        """Return node version information"""
        return self.w3.node_version.node

    def add_provider(self,
                     provider: Union[IPCProvider, WebsocketProvider,
                                     HTTPProvider] = None,
                     provider_uri: str = None,
                     timeout: int = None) -> None:

        if not provider_uri and not provider:
            raise self.NoProvider("No URI or provider instances supplied.")

        if provider_uri and not provider:
            uri_breakdown = urlparse(provider_uri)

            # PyEVM
            if uri_breakdown.scheme == 'tester':

                if uri_breakdown.netloc == 'pyevm':
                    genesis_params = PyEVMBackend._generate_genesis_params(
                        overrides={'gas_limit': NUCYPHER_GAS_LIMIT})
                    pyevm_backend = PyEVMBackend(
                        genesis_parameters=genesis_params)
                    eth_tester = EthereumTester(backend=pyevm_backend,
                                                auto_mine_transactions=True)
                    provider = EthereumTesterProvider(
                        ethereum_tester=eth_tester)

                elif uri_breakdown.netloc == 'geth':
                    # Hardcoded gethdev IPC provider
                    provider = IPCProvider(ipc_path='/tmp/geth.ipc',
                                           timeout=timeout)

                else:
                    raise ValueError(
                        "{} is an invalid or unsupported blockchain provider URI"
                        .format(provider_uri))

            # IPC
            elif uri_breakdown.scheme == 'ipc':
                provider = IPCProvider(ipc_path=uri_breakdown.path,
                                       timeout=timeout)

            # Websocket
            elif uri_breakdown.scheme == 'ws':
                provider = WebsocketProvider(endpoint_uri=provider_uri)

            # HTTP
            elif uri_breakdown.scheme in ('http', 'https'):
                provider = HTTPProvider(endpoint_uri=provider_uri)

            else:
                raise self.InterfaceError(
                    "'{}' is not a blockchain provider protocol".format(
                        uri_breakdown.scheme))

            self.__provider = provider

    def get_contract_factory(self, contract_name: str) -> Contract:
        """Retrieve compiled interface data from the cache and return web3 contract"""
        try:
            interface = self.__raw_contract_cache[contract_name]
        except KeyError:
            raise self.UnknownContract(
                '{} is not a locally compiled contract.'.format(contract_name))
        except TypeError:
            if self.__raw_contract_cache is NO_COMPILATION_PERFORMED:
                message = "The local contract compiler cache is empty because no compilation was performed."
                raise self.InterfaceError(message)
            raise
        else:
            contract = self.w3.eth.contract(abi=interface['abi'],
                                            bytecode=interface['bin'],
                                            ContractFactoryClass=Contract)
            return contract

    def _wrap_contract(self,
                       wrapper_contract: Contract,
                       target_contract: Contract,
                       factory=Contract) -> Contract:
        """
        Used for upgradeable contracts;
        Returns a new contract object assembled with the address of one contract but the abi or another.
        """

        # Wrap the contract
        wrapped_contract = self.w3.eth.contract(
            abi=target_contract.abi,
            address=wrapper_contract.address,
            ContractFactoryClass=factory)
        return wrapped_contract

    def get_contract_by_address(self, address: str):
        """Read a single contract's data from the registrar and return it."""
        try:
            contract_records = self.registry.search(contract_address=address)
        except RuntimeError:
            raise self.InterfaceError(
                'Corrupted Registrar')  # TODO: Integrate with Registry
        else:
            if not contract_records:
                raise self.UnknownContract(
                    "No such contract with address {}".format(address))
            return contract_records[0]

    def get_contract_by_name(self,
                             name: str,
                             proxy_name: str = None,
                             use_proxy_address: bool = True,
                             factory: Contract = Contract) -> Contract:
        """
        Instantiate a deployed contract from registry data,
        and assimilate it with it's proxy if it is upgradeable.
        """

        target_contract_records = self.registry.search(contract_name=name)

        if not target_contract_records:
            raise self.UnknownContract(
                "No such contract records with name {}".format(name))

        if proxy_name:  # It's upgradeable
            # Lookup proxies; Search fot a published proxy that targets this contract record

            proxy_records = self.registry.search(contract_name=proxy_name)

            unified_pairs = list()
            for proxy_name, proxy_addr, proxy_abi in proxy_records:
                proxy_contract = self.w3.eth.contract(
                    abi=proxy_abi,
                    address=proxy_addr,
                    ContractFactoryClass=factory)

                # Read this dispatchers target address from the blockchain
                proxy_live_target_address = proxy_contract.functions.target(
                ).call()
                for target_name, target_addr, target_abi in target_contract_records:

                    if target_addr == proxy_live_target_address:
                        if use_proxy_address:
                            pair = (proxy_addr, target_abi)
                        else:
                            pair = (proxy_live_target_address, target_abi)
                    else:
                        continue

                    unified_pairs.append(pair)

            if len(unified_pairs) > 1:
                address, abi = unified_pairs[0]
                message = "Multiple {} deployments are targeting {}".format(
                    proxy_name, address)
                raise self.InterfaceError(message.format(name))

            else:
                selected_address, selected_abi = unified_pairs[0]

        else:  # It's not upgradeable
            if len(target_contract_records) != 1:
                m = "Multiple records registered for non-upgradeable contract {}"
                raise self.InterfaceError(m.format(name))
            _target_contract_name, selected_address, selected_abi = target_contract_records[
                0]

        # Create the contract from selected sources
        unified_contract = self.w3.eth.contract(abi=selected_abi,
                                                address=selected_address,
                                                ContractFactoryClass=factory)

        return unified_contract

    def call_backend_sign(self, account: str, message: bytes) -> str:
        """
        Calls the appropriate signing function for the specified account on the
        backend. If the backend is based on eth-tester, then it uses the
        eth-tester signing interface to do so.
        """
        provider = self.provider
        if isinstance(provider, EthereumTesterProvider):
            address = to_canonical_address(account)
            sig_key = provider.ethereum_tester.backend._key_lookup[address]
            signed_message = sig_key.sign_msg(message)
            return signed_message
        else:
            return self.w3.eth.sign(
                account, data=message)  # TODO: Technically deprecated...

    def call_backend_verify(self, pubkey: PublicKey, signature: Signature,
                            msg_hash: bytes):
        """
        Verifies a hex string signature and message hash are from the provided
        public key.
        """
        is_valid_sig = signature.verify_msg_hash(msg_hash, pubkey)
        sig_pubkey = signature.recover_public_key_from_msg_hash(msg_hash)

        return is_valid_sig and (sig_pubkey == pubkey)

    def unlock_account(self, address, password, duration):
        if 'tester' in self.provider_uri:
            return True  # Test accounts are unlocked by default.
        return self.w3.personal.unlockAccount(address, password, duration)
Exemple #36
0
class TesterBlockchain(BlockchainDeployerInterface):
    """
    Blockchain subclass with additional test utility methods and options.
    """

    _instance = None

    _PROVIDER_URI = 'tester://pyevm'
    TEST_CONTRACTS_DIR = os.path.join(BASE_DIR, 'tests', 'blockchain', 'eth',
                                      'contracts', 'contracts')
    _compiler = SolidityCompiler(
        source_dirs=[(SolidityCompiler.default_contract_dir(),
                      {TEST_CONTRACTS_DIR})])
    _test_account_cache = list()

    _default_test_accounts = NUMBER_OF_ETH_TEST_ACCOUNTS

    # Reserved addresses
    _ETHERBASE = 0
    _ALICE = 1
    _BOB = 2
    _FIRST_STAKER = 5
    _stakers_range = range(NUMBER_OF_STAKERS_IN_BLOCKCHAIN_TESTS)
    _FIRST_URSULA = _FIRST_STAKER + NUMBER_OF_STAKERS_IN_BLOCKCHAIN_TESTS
    _ursulas_range = range(NUMBER_OF_URSULAS_IN_BLOCKCHAIN_TESTS)

    _default_token_economics = StandardTokenEconomics()

    def __init__(self,
                 test_accounts=None,
                 poa=True,
                 light=False,
                 eth_airdrop=False,
                 free_transactions=False,
                 compiler: SolidityCompiler = None,
                 *args,
                 **kwargs):

        if not test_accounts:
            test_accounts = self._default_test_accounts
        self.free_transactions = free_transactions

        if compiler:
            TesterBlockchain._compiler = compiler

        super().__init__(provider_uri=self._PROVIDER_URI,
                         provider_process=None,
                         poa=poa,
                         light=light,
                         compiler=self._compiler,
                         *args,
                         **kwargs)

        self.log = Logger("test-blockchain")
        self.connect()

        # Generate additional ethereum accounts for testing
        population = test_accounts
        enough_accounts = len(self.client.accounts) >= population
        if not enough_accounts:
            accounts_to_make = population - len(self.client.accounts)
            self.__generate_insecure_unlocked_accounts(
                quantity=accounts_to_make)
            assert test_accounts == len(self.w3.eth.accounts)

        if eth_airdrop is True:  # ETH for everyone!
            self.ether_airdrop(amount=DEVELOPMENT_ETH_AIRDROP_AMOUNT)

    @staticmethod
    def free_gas_price_strategy(w3, transaction_params=None):
        return 0

    def attach_middleware(self):
        super().attach_middleware()
        if self.free_transactions:
            self.w3.eth.setGasPriceStrategy(self.free_gas_price_strategy)

    def __generate_insecure_unlocked_accounts(self,
                                              quantity: int) -> List[str]:

        #
        # Sanity Check - Only PyEVM can be used.
        #

        # Detect provider platform
        client_version = self.w3.clientVersion

        if 'Geth' in client_version:
            raise RuntimeError("WARNING: Geth providers are not implemented.")
        elif "Parity" in client_version:
            raise RuntimeError(
                "WARNING: Parity providers are not implemented.")

        addresses = list()
        for _ in range(quantity):
            address = self.provider.ethereum_tester.add_account(
                '0x' + os.urandom(32).hex())
            addresses.append(address)
            self._test_account_cache.append(address)
            self.log.info('Generated new insecure account {}'.format(address))
        return addresses

    def ether_airdrop(self, amount: int) -> List[str]:
        """Airdrops ether from creator address to all other addresses!"""

        coinbase, *addresses = self.w3.eth.accounts

        tx_hashes = list()
        for address in addresses:
            tx = {'to': address, 'from': coinbase, 'value': amount}
            txhash = self.w3.eth.sendTransaction(tx)

            _receipt = self.wait_for_receipt(txhash)
            tx_hashes.append(txhash)
            eth_amount = Web3().fromWei(amount, 'ether')
            self.log.info("Airdropped {} ETH {} -> {}".format(
                eth_amount, tx['from'], tx['to']))

        return tx_hashes

    def time_travel(self,
                    hours: int = None,
                    seconds: int = None,
                    periods: int = None):
        """
        Wait the specified number of wait_hours by comparing
        block timestamps and mines a single block.
        """

        more_than_one_arg = sum(map(bool, (hours, seconds, periods))) > 1
        if more_than_one_arg:
            raise ValueError(
                "Specify hours, seconds, or periods, not a combination")

        if periods:
            duration = self._default_token_economics.seconds_per_period * periods
            base = self._default_token_economics.seconds_per_period
        elif hours:
            duration = hours * (60 * 60)
            base = 60 * 60
        elif seconds:
            duration = seconds
            base = 1
        else:
            raise ValueError("Specify either hours, seconds, or periods.")

        now = self.w3.eth.getBlock(block_identifier='latest').timestamp
        end_timestamp = ((now + duration) // base) * base

        self.w3.eth.web3.testing.timeTravel(timestamp=end_timestamp)
        self.w3.eth.web3.testing.mine(1)

        delta = maya.timedelta(seconds=end_timestamp - now)
        self.log.info(
            f"Time traveled {delta} "
            f"| period {epoch_to_period(epoch=end_timestamp, seconds_per_period=self._default_token_economics.seconds_per_period)} "
            f"| epoch {end_timestamp}")

    @classmethod
    def bootstrap_network(
        cls,
        economics: TokenEconomics = None
    ) -> Tuple['TesterBlockchain', 'InMemoryContractRegistry']:
        """For use with metric testing scripts"""

        registry = InMemoryContractRegistry()
        testerchain = cls(compiler=SolidityCompiler())
        BlockchainInterfaceFactory.register_interface(testerchain)
        power = TransactingPower(password=INSECURE_DEVELOPMENT_PASSWORD,
                                 account=testerchain.etherbase_account)
        power.activate()
        testerchain.transacting_power = power

        origin = testerchain.client.etherbase
        deployer = ContractAdministrator(deployer_address=origin,
                                         registry=registry,
                                         economics=economics
                                         or cls._default_token_economics,
                                         staking_escrow_test_mode=True)
        secrets = dict()
        for deployer_class in deployer.upgradeable_deployer_classes:
            secrets[
                deployer_class.contract_name] = INSECURE_DEVELOPMENT_PASSWORD
        _receipts = deployer.deploy_network_contracts(secrets=secrets,
                                                      interactive=False)
        return testerchain, registry

    @property
    def etherbase_account(self):
        return self.client.accounts[self._ETHERBASE]

    @property
    def alice_account(self):
        return self.client.accounts[self._ALICE]

    @property
    def bob_account(self):
        return self.client.accounts[self._BOB]

    def ursula_account(self, index):
        if index not in self._ursulas_range:
            raise ValueError(
                f"Ursula index must be lower than {NUMBER_OF_URSULAS_IN_BLOCKCHAIN_TESTS}"
            )
        return self.client.accounts[index + self._FIRST_URSULA]

    def staker_account(self, index):
        if index not in self._stakers_range:
            raise ValueError(
                f"Staker index must be lower than {NUMBER_OF_STAKERS_IN_BLOCKCHAIN_TESTS}"
            )
        return self.client.accounts[index + self._FIRST_STAKER]

    @property
    def ursulas_accounts(self):
        return list(self.ursula_account(i) for i in self._ursulas_range)

    @property
    def stakers_accounts(self):
        return list(self.staker_account(i) for i in self._stakers_range)

    @property
    def unassigned_accounts(self):
        special_accounts = [
            self.etherbase_account, self.alice_account, self.bob_account
        ]
        assigned_accounts = set(self.stakers_accounts + self.ursulas_accounts +
                                special_accounts)
        accounts = set(self.client.accounts)
        return list(accounts.difference(assigned_accounts))

    def wait_for_receipt(self, txhash: bytes, timeout: int = None) -> dict:
        """Wait for a transaction receipt and return it"""
        timeout = timeout or self.TIMEOUT
        result = self.w3.eth.waitForTransactionReceipt(txhash, timeout=timeout)
        if result.status == 0:
            raise TransactionFailed()
        return result
Exemple #37
0
class WorkTracker:

    CLOCK = reactor
    REFRESH_RATE = 60 * 15  # Fifteen minutes

    def __init__(self, worker, refresh_rate: int = None, *args, **kwargs):

        super().__init__(*args, **kwargs)
        self.log = Logger('stake-tracker')

        self.worker = worker
        self.staking_agent = self.worker.staking_agent

        self._refresh_rate = refresh_rate or self.REFRESH_RATE
        self._tracking_task = task.LoopingCall(self._do_work)
        self._tracking_task.clock = self.CLOCK

        self.__current_period = None
        self.__start_time = NOT_STAKING
        self.__uptime_period = NOT_STAKING
        self._abort_on_error = True

    @property
    def current_period(self):
        return self.__current_period

    def stop(self) -> None:
        self._tracking_task.stop()
        self.log.info(f"STOPPED WORK TRACKING")

    def start(self, act_now: bool = False, force: bool = False) -> None:
        """
        High-level stake tracking initialization, this function aims
        to be safely called at any time - For example, it is okay to call
        this function multiple times within the same period.
        """
        if self._tracking_task.running and not force:
            return

        # Record the start time and period
        self.__start_time = maya.now()
        self.__uptime_period = self.staking_agent.get_current_period()
        self.__current_period = self.__uptime_period

        d = self._tracking_task.start(interval=self._refresh_rate)
        d.addErrback(self.handle_working_errors)
        self.log.info(f"STARTED WORK TRACKING")

        if act_now:
            self._do_work()

    def _crash_gracefully(self, failure=None) -> None:
        """
        A facility for crashing more gracefully in the event that
        an exception is unhandled in a different thread.
        """
        self._crashed = failure
        failure.raiseException()

    def handle_working_errors(self, *args, **kwargs) -> None:
        failure = args[0]
        if self._abort_on_error:
            self.log.critical(f"Unhandled error during node work tracking. {failure}")
            reactor.callFromThread(self._crash_gracefully, failure=failure)
        else:
            self.log.warn(f"Unhandled error during work tracking: {failure.getTraceback()}")

    def _do_work(self) -> None:
        # TODO: #1515 Shut down at end of terminal stake

        # Update on-chain status
        self.log.info(f"Checking for new period. Current period is {self.__current_period}")
        onchain_period = self.staking_agent.get_current_period()  # < -- Read from contract
        if self.current_period != onchain_period:
            self.__current_period = onchain_period
            # self.worker.stakes.refresh()  # TODO: #1517 Move this a better location

        # Measure working interval
        interval = onchain_period - self.worker.last_active_period
        if interval < 0:
            return  # No need to confirm this period.  Save the gas.
        if interval > 0:
            # TODO: #1516 Follow-up actions for downtime
            self.log.warn(f"MISSED CONFIRMATIONS - {interval} missed staking confirmations detected.")

        # Confirm Activity
        self.log.info("Confirmed activity for period {}".format(self.current_period))
        transacting_power = self.worker.transacting_power
        with transacting_power:
            self.worker.confirm_activity()  # < --- blockchain WRITE
Exemple #38
0
class BaseContractRegistry(ABC):
    """
    Records known contracts on the disk for future access and utility. This
    lazily writes to the filesystem during contract enrollment.

    WARNING: Unless you are developing NuCypher, you most likely won't ever need
    to use this.
    """

    logger = Logger('ContractRegistry')

    _multi_contract = True
    _contract_name = NotImplemented

    # Registry
    REGISTRY_NAME = 'contract_registry.json'  # TODO: Save registry with ID-time-based filename
    DEVELOPMENT_REGISTRY_NAME = 'dev_contract_registry.json'

    _PUBLICATION_USER = "******"
    _PUBLICATION_REPO = f"{_PUBLICATION_USER}/ethereum-contract-registry"
    _PUBLICATION_BRANCH = 'goerli'  # TODO: Allow other branches to be used

    class RegistryError(Exception):
        pass

    class RegistrySourceUnavailable(RegistryError):
        pass

    class EmptyRegistry(RegistryError):
        pass

    class NoRegistry(RegistryError):
        pass

    class UnknownContract(RegistryError):
        pass

    class InvalidRegistry(RegistryError):
        """Raised when invalid data is encountered in the registry"""

    def __init__(self, *args, **kwargs):
        self.log = Logger("registry")

    def __eq__(self, other) -> bool:
        if self is other:
            return True  # and that's all
        return bool(self.id == other.id)

    def __repr__(self) -> str:
        r = f"{self.__class__.__name__}(id={self.id[:6]})"
        return r

    @property
    def id(self) -> str:
        """Returns a hexstr of the registry contents."""
        blake = hashlib.blake2b()
        blake.update(json.dumps(self.read()).encode())
        digest = blake.digest().hex()
        return digest

    @abstractmethod
    def _destroy(self) -> None:
        raise NotImplementedError

    @abstractmethod
    def write(self, registry_data: list) -> None:
        raise NotImplementedError

    @abstractmethod
    def read(self) -> Union[list, dict]:
        raise NotImplementedError

    @classmethod
    def get_publication_endpoint(cls) -> str:
        url = f'https://raw.githubusercontent.com/{cls._PUBLICATION_REPO}/{cls._PUBLICATION_BRANCH}/{cls.REGISTRY_NAME}'
        return url

    @classmethod
    def fetch_latest_publication(cls) -> bytes:
        # Setup
        publication_endpoint = cls.get_publication_endpoint()
        cls.logger.debug(
            f"Downloading contract registry from {publication_endpoint}")
        response = requests.get(publication_endpoint)

        # Fetch
        if response.status_code != 200:
            error = f"Failed to fetch registry from {publication_endpoint} with status code {response.status_code}"
            raise cls.RegistrySourceUnavailable(error)

        registry_data = response.content
        return registry_data

    @classmethod
    def from_latest_publication(cls, *args,
                                **kwargs) -> 'BaseContractRegistry':
        """
        Get the latest published contract registry from github and save it on the local file system.
        """
        registry_data_bytes = cls.fetch_latest_publication()
        instance = cls(*args, **kwargs)
        instance.write(registry_data=json.loads(registry_data_bytes))
        return instance

    @property
    def enrolled_names(self) -> Iterator:
        entries = iter(record[0] for record in self.read())
        return entries

    @property
    def enrolled_addresses(self) -> Iterator:
        entries = iter(record[1] for record in self.read())
        return entries

    def enroll(self, contract_name, contract_address, contract_abi) -> None:
        """
        Enrolls a contract to the chain registry by writing the name, address,
        and abi information to the filesystem as JSON.

        Note: Unless you are developing NuCypher, you most likely won't ever
        need to use this.
        """
        contract_data = [contract_name, contract_address, contract_abi]
        try:
            registry_data = self.read()
        except self.RegistryError:
            self.log.info("Blank registry encountered: enrolling {}:{}".format(
                contract_name, contract_address))
            registry_data = list()  # empty registry

        registry_data.append(contract_data)
        self.write(registry_data)
        self.log.info("Enrolled {}:{} into registry.".format(
            contract_name, contract_address))

    def search(self,
               contract_name: str = None,
               contract_address: str = None) -> tuple:
        """
        Searches the registry for a contract with the provided name or address
        and returns the contracts component data.
        """
        if not (bool(contract_name) ^ bool(contract_address)):
            raise ValueError(
                "Pass contract_name or contract_address, not both.")

        contracts = list()
        registry_data = self.read()

        try:
            for name, addr, abi in registry_data:
                if contract_name == name or contract_address == addr:
                    contracts.append((name, addr, abi))
        except ValueError:
            message = "Missing or corrupted registry data"
            self.log.critical(message)
            raise self.InvalidRegistry(message)

        if not contracts:
            raise self.UnknownContract(contract_name)

        if contract_address and len(contracts) > 1:
            m = f"Multiple records returned for address {contract_address}"
            self.log.critical(m)
            raise self.InvalidRegistry(m)

        result = tuple(contracts) if contract_name else contracts[0]
        return result
class TimeSchedule:

    def __init__(self, lock, host='127.0.0.1', port='6800'):
        config = Config()
        self.db = glv.get_value(key='sqlite_db')
        self.user_name = config.get('auth_username', '')
        self.user_password = config.get('auth_password', '')
        self.start_time = time.strftime("%Y %m %d %H %M %S", time.localtime())
        self.server_port = 'http://{}:{}/'.format(host, port)
        self.schedule_post_url = 'http://{}:{}/schedule.json'.format(host, port)
        self.listproject_url = 'http://{}:{}/listprojects.json'.format(host, port)
        self.spider_task_dic = dict()
        self.projects = None
        self.db_lock = lock
        self.ts_lock = threading.Lock()
        self._keys_set = {
            "year",
            "month",
            "day",
            "week",
            "hour",
            "minute",
            "second",
            "y",
            "m",
            "d",
            "w",
            "H",
            "M",
            "S",
        }
        self._keys_dic = {
            "y": "year",
            "m": "month",
            "d": "day",
            "w": "week",
            "H": "hour",
            "M": "minute",
            "S": "second",
        }
        self._keys_set_lis = [[y for y in x] for x in self._keys_set]
        self.CPU_THRESHOLD = 93
        self.MEMORY_THRESHOLD = 96
        self.schedule_logger = Logger(namespace='- Scheduler -')

    def run(self):
        time.sleep(3)
        self.projects = self.list_projects()
        self.schedule_logger.info('scheduler is running')
        count = 1
        while True:
            schedule_sta = self.task_scheduler()
            if not schedule_sta and count == 1:
                self.schedule_logger.info('No Scheduled Spider in Database')
                count += 1
            elif not schedule_sta and count != 1:
                count += 1
            else:
                count = 1
            time.sleep(1)

    def task_scheduler(self):
        self.ts_lock.acquire(blocking=True)
        self.db_lock.acquire()
        db_result = self.db.get(model_name='SpiderScheduleModel',
                                key_list=['hash_str', 'project', 'spider', 'schedule', 'args', 'runtime', 'status'])
        self.db_lock.release()
        self.ts_lock.release()
        schedule_list_raw = [
            {'hash_str': x.hash_str, 'project': x.project, 'spider': x.spider, 'schedule': x.schedule, 'args': x.args, 'runtime': x.runtime,
             'status': x.status}
            for x in db_result if int(x.status) != 0
        ] if db_result else []
        schedule_sta = False
        if schedule_list_raw:
            for each_schedule in schedule_list_raw:
                project = each_schedule.get('project')
                runtime = int(each_schedule.get('runtime'))
                if project in self.projects and runtime > 0:
                    schedule = each_schedule.get('schedule')

                    if any([x in schedule for x in self._keys_set]):
                        try:
                            schedule = json.loads(schedule)
                        except:
                            schedule = eval(schedule)
                    try:
                        if isinstance(schedule, dict):
                            for key in schedule.keys():
                                if key not in self._keys_set:
                                    mean_key = self._check_key(key)
                                    raise ValueError(
                                        'found "{}" in your schedule dict, maybe you mean "{}"'.format(key, mean_key))
                                if key in self._keys_dic:
                                    val = schedule.pop(key)
                                    schedule[self._keys_dic[key]] = val
                            next_time_sep = self.cal_time_sep(**schedule)
                        else:
                            next_time_sep = self.cal_time_sep(schedule_str=schedule, is_str=True)
                        next_time_sep = int(next_time_sep) + 1
                        if next_time_sep > 1:
                            each_schedule['schedule'] = next_time_sep
                            item = '{}-{}'.format(each_schedule['project'], each_schedule['spider'])
                            self.ts_lock.acquire(blocking=True)
                            if self.spider_task_dic.get(item) != 'waiting':
                                self.spider_task_dic[item] = 'waiting'
                                t = threading.Thread(target=self.poster, args=(each_schedule,))
                                try:
                                    t.start()
                                except Exception as THError:
                                    self.schedule_logger.warn('start new job error [ {} ]: {}'.format(item, THError))
                            self.ts_lock.release()
                    except ValueError as V:
                        self.schedule_logger.error('spider runtime schedule error, please check the database: {}'.format(V))
            schedule_sta = True
        return schedule_sta

    def poster(self, dic):
        hash_str = dic.get('hash_str')
        status = int(dic.pop('status'))
        project = dic.get('project')
        spider = dic.get('spider')
        job_str = " %s-%s " % (project, spider)
        args = dic.get('args')
        try:
            args = json.loads(args)
        except:
            args = eval(args)
        wait_time = dic.get('schedule')
        item = '{}-{}'.format(project, spider)
        if project and spider:
            data = {'project': project, 'spider': spider, 'un': self.user_name, 'pwd': self.user_password}
            if args:
                args = self._spider_args_method(args, hash_str)
                data.update(args)
            self.schedule_logger.info('job {} is waiting, countdown {}s'.format(item, wait_time))
            time.sleep(wait_time - 1)
            another_wait_time = 0
            spider_runtime_avg = self.spiders_runtime(project=project, spider=spider)
            if status == 1:
                while not self.is_system_ok():
                    self.schedule_logger.warn('system is fully functioning, wait another 2 seconds to post schedule')
                    time.sleep(2)
                    another_wait_time += 3
                    if another_wait_time >= (wait_time - spider_runtime_avg):
                        self.schedule_logger.warning('wait too long, cancel the job %s' % job_str)
                        return None
                res = json.loads(requests.post(url=self.schedule_post_url, data=data).content)
            elif status == 2:
                res = json.loads(requests.post(url=self.schedule_post_url, data=data).content)
            elif status == 3:
                res = json.loads(requests.post(url=self.schedule_post_url, data=data).content)
            else:
                res = json.loads(requests.post(url=self.schedule_post_url, data=data).content)
            spider_status = res.get('status')
            if spider_status != 'ok':
                spider_status = 'error'
        else:
            self.schedule_logger.error('job project: {}, spider: {} post fail!'.format(project, spider))
            spider_status = 'error'
        self.ts_lock.acquire(blocking=True)
        if spider_status == 'ok':
            self._run_countdown(project=project, spider=spider)
        self.spider_task_dic[item] = spider_status
        self.ts_lock.release()

    def _spider_args_method(self, args, hash_str):
        args_raw = args.copy()
        if args:
            method = args.pop('method', 'normal')
            if method == 'auto_increment':
                next_args = {k: str(int(v)+1) if isinstance(v, int) or (isinstance(v, str) and v.isdigit()) else v for k, v in args.items()}
            elif isinstance(method, dict):
                ex_md = method.get('expression')
                fc_md = method.get('function')
                if ex_md:
                    next_args = eval(ex_md)
                if fc_md:
                    exec(fc_md)
            else:
                next_args = args
            next_args.update({'method': method})
            self.db.update('SpiderScheduleModel', update_dic={'args': next_args}, filter_dic={"hash_str": hash_str})
            return args
        return args_raw

    def spiders_runtime(self, project, spider):
        self.db_lock.acquire()
        res = self.db.get(model_name='SpiderMonitor', key_list=['runtime'],
                          filter_dic={'project': project, 'spider': spider})
        self.db_lock.release()
        spider_list = [int(x.runtime) for x in res if x.runtime.isdigit()] if res else [0]
        return sum(spider_list) / len(spider_list)

    def list_projects(self):
        res = requests.get(url=self.listproject_url)
        projects = {}
        if res:
            projects_list = json.loads(res.content).get('projects')
            if projects_list:
                projects = set(projects_list)
        return projects

    def cal_time_sep(self,
                     year='*',
                     month='*',
                     day='*',
                     week='*',
                     hour='*',
                     minute='*',
                     second='*',
                     schedule_str=None,
                     is_str=False
                     ):
        """
            "%Y-%m-%d %H:%M:%S %w"

        """
        if is_str:
            s = [int(x.strip()) for x in schedule_str.split(',')]
            time_sep = (datetime.datetime(s[0], s[1], s[2], s[3], s[4], s[5]) - datetime.datetime.now()).total_seconds()
            return time_sep
        y = int(time.strftime("%Y", time.localtime()))
        if year != '*' and '*' in year:
            y = int(year.split('/')[-1]) + y
        elif year.isdigit():
            y = int(year)

        if week == '*':
            m = int(time.strftime("%m", time.localtime()))
            if month != '*' and '*' in month:
                m_raw = int(month.split('/')[-1])
                if m_raw >= 12:
                    raise ValueError('month value is too large, please set the year instead')
                m = m_raw + m
                if m > 12:
                    y += m // 12
                    m = m % 12
            elif month.isdigit():
                m = int(month)

            days_in_this_month = self.how_many_days_in_this_month(y, m)
            d = int(time.strftime("%d", time.localtime()))
            if day != '*' and '*' in day:
                d_raw = int(day.split('/')[-1])
                if d_raw > days_in_this_month:
                    raise ValueError('day value is too large, please set the month or the year instead')
                d = d_raw + d
                if d > days_in_this_month:
                    d = d - days_in_this_month
                    m += 1
                    if m > 12:
                        y += 1
                        m = m - 12
            elif day.isdigit():
                d = int(day)

            days_in_this_month = self.how_many_days_in_this_month(y, m)
            H = int(time.strftime("%H", time.localtime()))
            if hour != '*' and '*' in hour:
                H_raw = int(hour.split('/')[-1])
                if H_raw > 24:
                    raise ValueError('hour value is too large, please set the day instead')
                H = H_raw + H
                if H >= 24:
                    H = H - 24
                    d += 1
                    if d > days_in_this_month:
                        d = d - days_in_this_month
                        m += 1
                        if m > 12:
                            y += 1
                            m = m - 12
            elif hour.isdigit():
                H = int(hour)

            days_in_this_month = self.how_many_days_in_this_month(y, m)
            M = int(time.strftime("%M", time.localtime()))
            if minute != '*' and '*' in minute:
                M_raw = int(minute.split('/')[-1])
                if M_raw > 60:
                    raise ValueError('minute value is too large, please set the hour instead')
                M = M_raw + M
                if M >= 60:
                    M = M - 60
                    H += 1
                    if H >= 24:
                        H = H - 24
                        d += 1
                        if d > days_in_this_month:
                            d = d - days_in_this_month
                            m += 1
                            if m > 12:
                                y += 1
                                m = m - 12
            elif minute.isdigit():
                M = int(minute)

            days_in_this_month = self.how_many_days_in_this_month(y, m)
            S = int(time.strftime("%S", time.localtime()))
            if second != '*' and '*' in second:
                S_raw = int(second.split('/')[-1])
                if S_raw > 60:
                    raise ValueError('second value is too large, please set the minute instead')
                S = S_raw + S
                if S >= 60:
                    S = S - 60
                    M += 1
                    if M >= 60:
                        M = M - 60
                        H += 1
                        if H >= 24:
                            H = H - 24
                            d += 1
                            if d > days_in_this_month:
                                d = d - days_in_this_month
                                m += 1
                                if m > 12:
                                    y += 1
                                    m = m - 12
            elif second.isdigit():
                S = int(second)
            time_sep = eval(
                "(datetime.datetime({},{},{}, {},{},{}) - datetime.datetime.now()).total_seconds()".format(y, m, d, H,
                                                                                                           M, S))

        else:
            week_in_this_year = int(time.strftime("%U", time.localtime()))
            w = int(time.strftime("%w", time.localtime()))
            if '*' in week:
                w_raw = int(week.split('/')[-1])
                if w_raw >= 7:
                    raise ValueError('week value is too large, please set the day or the month instead')
                if w_raw < w:
                    week_in_this_year += 1
                w = w_raw
                if week_in_this_year > 53:
                    y += 1
                    week_in_this_year = week_in_this_year - 53

            elif week.isdigit():
                w = int(week)
                if int(week) < w:
                    week_in_this_year += 1

            H = int(time.strftime("%H", time.localtime()))
            if hour != '*' and '*' in hour:
                H_raw = int(hour.split('/')[-1])
                if H_raw >= 24:
                    raise ValueError('hour value is too large, please set the day instead')
                H = H_raw + H
                if H >= 24:
                    H = H - 24
                    w += 1
                    if w >= 7:
                        w = w - 7
                        week_in_this_year += 1
                        if week_in_this_year > 53:
                            y += 1
                            week_in_this_year = week_in_this_year - 53
            elif hour.isdigit():
                H = int(hour)

            M = int(time.strftime("%M", time.localtime()))
            if minute != '*' and '*' in minute:
                M_raw = int(minute.split('/')[-1])
                if M_raw >= 60:
                    raise ValueError('minute value is too large, please set the hour instead')
                M = M_raw + M
                if M >= 60:
                    M = M - 60
                    H += 1
                    if H >= 24:
                        H = H - 24
                        w += 1
                        if w > 7:
                            w = w - 7
                            week_in_this_year += 1
                            if week_in_this_year > 53:
                                y += 1
                                week_in_this_year = week_in_this_year - 53
            elif minute.isdigit():
                M = int(minute)

            S = int(time.strftime("%S", time.localtime()))
            if second != '*' and '*' in second:
                S_raw = int(second.split('/')[-1])
                if S_raw >= 60:
                    raise ValueError('second value is too large, please set the minute instead')
                S = S_raw + S
                if S >= 60:
                    S = S - 60
                    M += 1
                    if M >= 60:
                        M = M - 60
                        H += 1
                        if H >= 24:
                            H = H - 24
                            w += 1
                            if w > 7:
                                w = w - 7
                                week_in_this_year += 1
                                if week_in_this_year > 53:
                                    y += 1
                                    week_in_this_year = week_in_this_year - 53
            elif second.isdigit():
                S = int(second)
            if S >= 60:
                S = S - 60
                M += 1
                if M >= 60:
                    M = M - 60
                    H += 1
                    if H >= 24:
                        H = H - 24
                        w += 1
                        if w > 7:
                            w = w - 7
                            week_in_this_year += 1
                            if week_in_this_year > 53:
                                y += 1
                                week_in_this_year = week_in_this_year - 53
            m, d = self.get_month_and_days_by_week(year=y, week_in_this_year=week_in_this_year, week=w)
            time_sep = eval(
                "(datetime.datetime({},{},{}, {},{},{}) - datetime.datetime.now()).total_seconds()".format(y, m, d, H,
                                                                                                           M, S))

        return time_sep

    def get_month_and_days_by_week(self, year, week_in_this_year, week):
        days = week_in_this_year * 7 + week
        if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
            Fe = 29
        else:
            Fe = 28
        month_lis = [31, Fe, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
        month_count = 1
        days_count = 0
        for month_days in month_lis:
            days = days - month_days
            if days > 0:
                month_count += 1
            elif days == 0:
                days_count = 0
                month_count += 1
                break
            else:
                days_count = days + month_days
                break
        return [month_count, days_count]

    def how_many_days_in_this_month(self, y, m):
        if m in (1, 3, 5, 7, 8, 10, 12):
            days = 31
        elif m in (4, 6, 9, 11):
            days = 30
        else:
            if (y % 4 == 0 and y % 100 != 0) or (y % 400 == 0):
                days = 29
            else:
                days = 28
        return days

    def is_system_ok(self):
        is_pass = True
        cpu_list = psutil.cpu_percent(interval=1, percpu=True)
        memory_percent = psutil.virtual_memory().percent
        if cpu_list and memory_percent:
            is_cpu_ok = True
            if min(cpu_list) > self.CPU_THRESHOLD:
                is_cpu_ok = False
            is_memo_ok = True
            if memory_percent > self.MEMORY_THRESHOLD:
                is_memo_ok = False
            if not is_cpu_ok or not is_memo_ok:
                is_pass = False
        return is_pass

    def _check_key(self, key):
        key_lis = [x for x in key]
        count_dic = dict()
        for ksl in self._keys_set_lis:
            o_key = ''.join(ksl)
            score = 0
            for k in key_lis:
                if k in ksl:
                    score += 1
            count_dic[o_key] = score
        best_math = sorted(count_dic, key=count_dic.__getitem__, reverse=True)[0]
        return best_math

    def _run_countdown(self, project, spider):
        db_schedule = self.db.get(model_name='SpiderScheduleModel', key_list=['id', 'runtime'],
                                  filter_dic={'project': project, 'spider': spider})
        run_time_in_db = [x.runtime for x in db_schedule][0] if db_schedule else 0
        the_id = [x.id for x in db_schedule][0] if db_schedule else None
        if run_time_in_db > 0 and the_id is not None:
            rt = int(run_time_in_db) - 1
            self.db.update(model_name='SpiderScheduleModel', update_dic={"runtime": rt}, filter_dic={"id": the_id})
Exemple #40
0
from __future__ import print_function, absolute_import

from twisted.internet import reactor
from twisted.logger import Logger, globalLogPublisher

from cheeselib.logger import PrintingObserver
from cheeselib.server.rpc import CheeseRPCServerFactory, CheeseRPCServer
from cheeselib.server.storage.mongo import MongoDAO

SERVER_PORT = 18080

log = Logger()

globalLogPublisher.addObserver(PrintingObserver())

dao = MongoDAO()

rpc_server = CheeseRPCServer(dao).getStreamFactory(CheeseRPCServerFactory)

reactor.listenTCP(SERVER_PORT, rpc_server)
log.info("Starting server on port %d..." % SERVER_PORT)
reactor.run()
Exemple #41
0
class NodeConfiguration(ABC):
    """
    'Sideways Engagement' of Character classes; a reflection of input parameters.
    """

    # Abstract
    _NAME = NotImplemented
    _CHARACTER_CLASS = NotImplemented
    CONFIG_FILENAME = NotImplemented
    DEFAULT_CONFIG_FILE_LOCATION = NotImplemented

    # Mode
    DEFAULT_OPERATING_MODE = 'decentralized'

    # Domains
    DEFAULT_DOMAIN = GLOBAL_DOMAIN

    # Serializers
    NODE_SERIALIZER = binascii.hexlify
    NODE_DESERIALIZER = binascii.unhexlify

    # System
    __CONFIG_FILE_EXT = '.config'
    __CONFIG_FILE_DESERIALIZER = json.loads
    TEMP_CONFIGURATION_DIR_PREFIX = "nucypher-tmp-"

    # Blockchain
    DEFAULT_PROVIDER_URI = 'tester://pyevm'

    # Registry
    __REGISTRY_NAME = 'contract_registry.json'
    REGISTRY_SOURCE = os.path.join(
        BASE_DIR, __REGISTRY_NAME)  # TODO: #461 Where will this be hosted?

    # Rest + TLS
    DEFAULT_REST_HOST = '127.0.0.1'
    DEFAULT_REST_PORT = 9151
    DEFAULT_DEVELOPMENT_REST_PORT = 10151
    __DEFAULT_TLS_CURVE = ec.SECP384R1
    __DEFAULT_NETWORK_MIDDLEWARE_CLASS = RestMiddleware

    class ConfigurationError(RuntimeError):
        pass

    class InvalidConfiguration(ConfigurationError):
        pass

    def __init__(
            self,

            # Base
            config_root: str = None,
            config_file_location: str = None,

            # Mode
            dev_mode: bool = False,
            federated_only: bool = False,

            # Identity
            is_me: bool = True,
            checksum_public_address: str = None,
            crypto_power: CryptoPower = None,

            # Keyring
            keyring: NucypherKeyring = None,
            keyring_dir: str = None,

            # Learner
            learn_on_same_thread: bool = False,
            abort_on_learning_error: bool = False,
            start_learning_now: bool = True,

            # REST
            rest_host: str = None,
            rest_port: int = None,

            # TLS
            tls_curve: EllipticCurve = None,
            certificate: Certificate = None,

            # Network
            domains: Set[str] = None,
            interface_signature: Signature = None,
            network_middleware: RestMiddleware = None,

            # Node Storage
            known_nodes: set = None,
            node_storage: NodeStorage = None,
            reload_metadata: bool = True,
            save_metadata: bool = True,

            # Blockchain
            poa: bool = False,
            provider_uri: str = None,

            # Registry
            registry_source: str = None,
            registry_filepath: str = None,
            import_seed_registry: bool = False  # TODO: needs cleanup
    ) -> None:

        # Logs
        self.log = Logger(self.__class__.__name__)

        #
        # REST + TLS (Ursula)
        #
        self.rest_host = rest_host or self.DEFAULT_REST_HOST
        default_port = (self.DEFAULT_DEVELOPMENT_REST_PORT
                        if dev_mode else self.DEFAULT_REST_PORT)
        self.rest_port = rest_port or default_port
        self.tls_curve = tls_curve or self.__DEFAULT_TLS_CURVE
        self.certificate = certificate

        self.interface_signature = interface_signature
        self.crypto_power = crypto_power

        #
        # Keyring
        #
        self.keyring = keyring or NO_KEYRING_ATTACHED
        self.keyring_dir = keyring_dir or UNINITIALIZED_CONFIGURATION

        # Contract Registry
        if import_seed_registry is True:
            registry_source = self.REGISTRY_SOURCE
            if not os.path.isfile(registry_source):
                message = "Seed contract registry does not exist at path {}.".format(
                    registry_filepath)
                self.log.debug(message)
                raise RuntimeError(message)
        self.__registry_source = registry_source or self.REGISTRY_SOURCE
        self.registry_filepath = registry_filepath or UNINITIALIZED_CONFIGURATION

        #
        # Configuration
        #
        self.config_file_location = config_file_location or UNINITIALIZED_CONFIGURATION
        self.config_root = UNINITIALIZED_CONFIGURATION

        #
        # Mode
        #
        self.federated_only = federated_only
        self.__dev_mode = dev_mode

        if self.__dev_mode:
            self.__temp_dir = UNINITIALIZED_CONFIGURATION
            self.node_storage = ForgetfulNodeStorage(
                federated_only=federated_only, character_class=self.__class__)
        else:
            self.__temp_dir = LIVE_CONFIGURATION
            self.config_root = config_root or DEFAULT_CONFIG_ROOT
            self._cache_runtime_filepaths()
            self.node_storage = node_storage or LocalFileBasedNodeStorage(
                federated_only=federated_only, config_root=self.config_root)

        # Domains
        self.domains = domains or {self.DEFAULT_DOMAIN}

        #
        # Identity
        #
        self.is_me = is_me
        self.checksum_public_address = checksum_public_address

        if self.is_me is True or dev_mode is True:
            # Self
            if self.checksum_public_address and dev_mode is False:
                self.attach_keyring()
            self.network_middleware = network_middleware or self.__DEFAULT_NETWORK_MIDDLEWARE_CLASS(
            )

        else:
            # Stranger
            self.node_storage = STRANGER_CONFIGURATION
            self.keyring_dir = STRANGER_CONFIGURATION
            self.keyring = STRANGER_CONFIGURATION
            self.network_middleware = STRANGER_CONFIGURATION
            if network_middleware:
                raise self.ConfigurationError(
                    "Cannot configure a stranger to use network middleware.")

        #
        # Learner
        #
        self.learn_on_same_thread = learn_on_same_thread
        self.abort_on_learning_error = abort_on_learning_error
        self.start_learning_now = start_learning_now
        self.save_metadata = save_metadata
        self.reload_metadata = reload_metadata

        self.__fleet_state = FleetStateTracker()
        known_nodes = known_nodes or set()
        if known_nodes:
            self.known_nodes._nodes.update(
                {node.checksum_public_address: node
                 for node in known_nodes})
            self.known_nodes.record_fleet_state(
            )  # TODO: Does this call need to be here?

        #
        # Blockchain
        #
        self.poa = poa
        self.provider_uri = provider_uri or self.DEFAULT_PROVIDER_URI

        self.blockchain = NO_BLOCKCHAIN_CONNECTION
        self.accounts = NO_BLOCKCHAIN_CONNECTION
        self.token_agent = NO_BLOCKCHAIN_CONNECTION
        self.miner_agent = NO_BLOCKCHAIN_CONNECTION
        self.policy_agent = NO_BLOCKCHAIN_CONNECTION

        #
        # Development Mode
        #
        if dev_mode:

            # Ephemeral dev settings
            self.abort_on_learning_error = True
            self.save_metadata = False
            self.reload_metadata = False

            # Generate one-time alphanumeric development password
            alphabet = string.ascii_letters + string.digits
            password = ''.join(secrets.choice(alphabet) for _ in range(32))

            # Auto-initialize
            self.initialize(password=password,
                            import_registry=import_seed_registry)

    def __call__(self, *args, **kwargs):
        return self.produce(*args, **kwargs)

    @classmethod
    def generate(cls, password: str, no_registry: bool, *args, **kwargs):
        """Shortcut: Hook-up a new initial installation and write configuration file to the disk"""
        node_config = cls(dev_mode=False, is_me=True, *args, **kwargs)
        node_config.__write(password=password, no_registry=no_registry)
        return node_config

    def __write(self, password: str, no_registry: bool):

        if not self.federated_only:
            self.connect_to_blockchain()

        _new_installation_path = self.initialize(password=password,
                                                 import_registry=no_registry)
        _configuration_filepath = self.to_configuration_file(
            filepath=self.config_file_location)

    def cleanup(self) -> None:
        if self.__dev_mode:
            self.__temp_dir.cleanup()

    @property
    def dev_mode(self):
        return self.__dev_mode

    @property
    def known_nodes(self):
        return self.__fleet_state

    def connect_to_blockchain(self, recompile_contracts: bool = False):
        if self.federated_only:
            raise NodeConfiguration.ConfigurationError(
                "Cannot connect to blockchain in federated mode")

        self.blockchain = Blockchain.connect(provider_uri=self.provider_uri,
                                             compile=recompile_contracts,
                                             poa=self.poa)

        self.accounts = self.blockchain.interface.w3.eth.accounts
        self.log.debug("Established connection to provider {}".format(
            self.blockchain.interface.provider_uri))

    def connect_to_contracts(self) -> None:
        """Initialize contract agency and set them on config"""
        self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)
        self.miner_agent = MinerAgent(blockchain=self.blockchain)
        self.policy_agent = PolicyAgent(blockchain=self.blockchain)
        self.log.debug("Established connection to nucypher contracts")

    def read_known_nodes(self):
        known_nodes = self.node_storage.all(federated_only=self.federated_only)
        known_nodes = {
            node.checksum_public_address: node
            for node in known_nodes
        }
        self.known_nodes._nodes.update(known_nodes)
        self.known_nodes.record_fleet_state()
        return self.known_nodes

    def forget_nodes(self) -> None:
        self.node_storage.clear()
        message = "Removed all stored node node metadata and certificates"
        self.log.debug(message)

    def destroy(self, force: bool = False, logs: bool = True) -> None:

        # TODO: Further confirm this is a nucypher dir first! (in-depth measure)

        if logs is True or force:
            shutil.rmtree(USER_LOG_DIR, ignore_errors=True)
        try:
            shutil.rmtree(self.config_root, ignore_errors=force)
        except FileNotFoundError:
            raise FileNotFoundError("No such directory {}".format(
                self.config_root))

    def generate_parameters(self, **overrides) -> dict:
        merged_parameters = {
            **self.static_payload,
            **self.dynamic_payload,
            **overrides
        }
        non_init_params = ('config_root', 'poa', 'provider_uri')
        character_init_params = filter(lambda t: t[0] not in non_init_params,
                                       merged_parameters.items())
        return dict(character_init_params)

    def produce(self, **overrides):
        """Initialize a new character instance and return it."""
        merged_parameters = self.generate_parameters(**overrides)
        character = self._CHARACTER_CLASS(**merged_parameters)
        return character

    @staticmethod
    def _read_configuration_file(filepath: str) -> dict:
        try:
            with open(filepath, 'r') as file:
                raw_contents = file.read()
                payload = NodeConfiguration.__CONFIG_FILE_DESERIALIZER(
                    raw_contents)
        except FileNotFoundError as e:
            raise  # TODO: Do we need better exception handling here?
        return payload

    @classmethod
    def from_configuration_file(cls,
                                filepath: str = None,
                                **overrides) -> 'NodeConfiguration':
        """Initialize a NodeConfiguration from a JSON file."""

        from nucypher.config.storages import NodeStorage
        node_storage_subclasses = {
            storage._name: storage
            for storage in NodeStorage.__subclasses__()
        }

        if filepath is None:
            filepath = cls.DEFAULT_CONFIG_FILE_LOCATION

        # Read from disk
        payload = cls._read_configuration_file(filepath=filepath)

        # Sanity check
        try:
            checksum_address = payload['checksum_public_address']
        except KeyError:
            raise cls.ConfigurationError(
                f"No checksum address specified in configuration file {filepath}"
            )
        else:
            if not eth_utils.is_checksum_address(checksum_address):
                raise cls.ConfigurationError(
                    f"Address: '{checksum_address}', specified in {filepath} is not a valid checksum address."
                )

        # Initialize NodeStorage subclass from file (sub-configuration)
        storage_payload = payload['node_storage']
        storage_type = storage_payload[NodeStorage._TYPE_LABEL]
        storage_class = node_storage_subclasses[storage_type]
        node_storage = storage_class.from_payload(
            payload=storage_payload,
            federated_only=payload['federated_only'],
            serializer=cls.NODE_SERIALIZER,
            deserializer=cls.NODE_DESERIALIZER)

        # Deserialize domains to UTF-8 bytestrings
        domains = list(domain.encode() for domain in payload['domains'])
        payload.update(dict(node_storage=node_storage, domains=domains))

        # Filter out Nones from overrides to detect, well, overrides
        overrides = {k: v for k, v in overrides.items() if v is not None}

        # Instantiate from merged params
        node_configuration = cls(**{**payload, **overrides})

        return node_configuration

    def to_configuration_file(self, filepath: str = None) -> str:
        """Write the static_payload to a JSON file."""
        if filepath is None:
            filename = '{}{}'.format(self._NAME.lower(),
                                     self.__CONFIG_FILE_EXT)
            filepath = os.path.join(self.config_root, filename)

        payload = self.static_payload
        del payload['is_me']

        # Serialize domains
        domains = list(str(domain) for domain in self.domains)

        # Save node connection data
        payload.update(
            dict(node_storage=self.node_storage.payload(), domains=domains))

        with open(filepath, 'w') as config_file:
            config_file.write(json.dumps(payload, indent=4))
        return filepath

    def validate(self, config_root: str, no_registry=False) -> bool:
        # Top-level
        if not os.path.exists(config_root):
            raise self.ConfigurationError(
                'No configuration directory found at {}.'.format(config_root))

        # Sub-paths
        filepaths = self.runtime_filepaths
        if no_registry:
            del filepaths['registry_filepath']

        for field, path in filepaths.items():
            if not os.path.exists(path):
                message = 'Missing configuration file or directory: {}.'
                if 'registry' in path:
                    message += ' Did you mean to pass --federated-only?'
                raise NodeConfiguration.InvalidConfiguration(
                    message.format(path))
        return True

    @property
    def static_payload(self) -> dict:
        """Exported static configuration values for initializing Ursula"""
        payload = dict(
            config_root=self.config_root,

            # Identity
            is_me=self.is_me,
            federated_only=self.federated_only,
            checksum_public_address=self.checksum_public_address,
            keyring_dir=self.keyring_dir,

            # Behavior
            domains=self.domains,  # From Set
            provider_uri=self.provider_uri,
            learn_on_same_thread=self.learn_on_same_thread,
            abort_on_learning_error=self.abort_on_learning_error,
            start_learning_now=self.start_learning_now,
            save_metadata=self.save_metadata,
        )

        if not self.federated_only:
            payload.update(dict(provider_uri=self.provider_uri, poa=self.poa))

        return payload

    @property
    def dynamic_payload(self, **overrides) -> dict:
        """Exported dynamic configuration values for initializing Ursula"""

        if self.reload_metadata:
            known_nodes = self.node_storage.all(
                federated_only=self.federated_only)
            known_nodes = {
                node.checksum_public_address: node
                for node in known_nodes
            }
            self.known_nodes._nodes.update(known_nodes)
        self.known_nodes.record_fleet_state()

        payload = dict(network_middleware=self.network_middleware
                       or self.__DEFAULT_NETWORK_MIDDLEWARE_CLASS(),
                       known_nodes=self.known_nodes,
                       node_storage=self.node_storage,
                       crypto_power_ups=self.derive_node_power_ups() or None)

        if not self.federated_only:
            self.connect_to_blockchain(recompile_contracts=False)
            payload.update(blockchain=self.blockchain)

        if overrides:
            self.log.debug(
                "Overrides supplied to dynamic payload for {}".format(
                    self.__class__.__name__))
            payload.update(overrides)

        return payload

    @property
    def runtime_filepaths(self):
        filepaths = dict(config_root=self.config_root,
                         keyring_dir=self.keyring_dir,
                         registry_filepath=self.registry_filepath)
        return filepaths

    @classmethod
    def generate_runtime_filepaths(cls, config_root: str) -> dict:
        """Dynamically generate paths based on configuration root directory"""
        filepaths = dict(
            config_root=config_root,
            config_file_location=os.path.join(config_root,
                                              cls.CONFIG_FILENAME),
            keyring_dir=os.path.join(config_root, 'keyring'),
            registry_filepath=os.path.join(config_root,
                                           NodeConfiguration.__REGISTRY_NAME))
        return filepaths

    def _cache_runtime_filepaths(self) -> None:
        """Generate runtime filepaths and cache them on the config object"""
        filepaths = self.generate_runtime_filepaths(
            config_root=self.config_root)
        for field, filepath in filepaths.items():
            if getattr(self, field) is UNINITIALIZED_CONFIGURATION:
                setattr(self, field, filepath)

    def derive_node_power_ups(self) -> List[CryptoPowerUp]:
        power_ups = list()
        if self.is_me and not self.dev_mode:
            for power_class in self._CHARACTER_CLASS._default_crypto_powerups:
                power_up = self.keyring.derive_crypto_power(power_class)
                power_ups.append(power_up)
        return power_ups

    def initialize(
        self,
        password: str,
        import_registry: bool = True,
    ) -> str:
        """Initialize a new configuration."""

        #
        # Create Config Root
        #
        if self.__dev_mode:
            self.__temp_dir = TemporaryDirectory(
                prefix=self.TEMP_CONFIGURATION_DIR_PREFIX)
            self.config_root = self.__temp_dir.name
        else:
            try:
                os.mkdir(self.config_root, mode=0o755)

            except FileExistsError:
                if os.listdir(self.config_root):
                    message = "There are existing files located at {}".format(
                        self.config_root)
                    self.log.debug(message)

            except FileNotFoundError:
                os.makedirs(self.config_root, mode=0o755)

        #
        # Create Config Subdirectories
        #
        self._cache_runtime_filepaths()
        try:

            # Node Storage
            self.node_storage.initialize()

            # Keyring
            if not self.dev_mode:
                if not os.path.isdir(self.keyring_dir):
                    os.mkdir(
                        self.keyring_dir, mode=0o700
                    )  # keyring TODO: Keyring backend entry point: COS
                self.write_keyring(password=password)

            # Registry
            if import_registry and not self.federated_only:
                self.write_registry(
                    output_filepath=self.registry_filepath,  # type: str
                    source=self.__registry_source,  # type: str
                    blank=import_registry)  # type: bool

        except FileExistsError:
            existing_paths = [
                os.path.join(self.config_root, f)
                for f in os.listdir(self.config_root)
            ]
            message = "There are pre-existing files at {}: {}".format(
                self.config_root, existing_paths)
            self.log.info(message)

        if not self.__dev_mode:
            self.validate(config_root=self.config_root,
                          no_registry=import_registry or self.federated_only)

        # Success
        message = "Created nucypher installation files at {}".format(
            self.config_root)
        self.log.debug(message)
        return self.config_root

    def attach_keyring(self,
                       checksum_address: str = None,
                       *args,
                       **kwargs) -> None:
        if self.keyring is not NO_KEYRING_ATTACHED:
            if self.keyring.checksum_address != (checksum_address or
                                                 self.checksum_public_address):
                raise self.ConfigurationError(
                    "There is already a keyring attached to this configuration."
                )
            return

        if (checksum_address or self.checksum_public_address) is None:
            raise self.ConfigurationError(
                "No account specified to unlock keyring")

        self.keyring = NucypherKeyring(
            keyring_root=self.keyring_dir,  # type: str
            account=checksum_address
            or self.checksum_public_address,  # type: str
            *args,
            **kwargs)

    def write_keyring(self, password: str,
                      **generation_kwargs) -> NucypherKeyring:

        if not self.federated_only and not self.checksum_public_address:
            checksum_address = self.blockchain.interface.w3.eth.accounts[
                0]  # etherbase
        else:
            checksum_address = self.checksum_public_address

        self.keyring = NucypherKeyring.generate(
            password=password,
            keyring_root=self.keyring_dir,
            checksum_address=checksum_address,
            **generation_kwargs)
        # Operating mode switch TODO: #466
        if self.federated_only:
            self.checksum_public_address = self.keyring.federated_address
        else:
            self.checksum_public_address = self.keyring.account

        return self.keyring

    def write_registry(self,
                       output_filepath: str = None,
                       source: str = None,
                       force: bool = False,
                       blank=False) -> str:

        if force and os.path.isfile(output_filepath):
            raise self.ConfigurationError(
                'There is an existing file at the registry output_filepath {}'.
                format(output_filepath))

        output_filepath = output_filepath or self.registry_filepath
        source = source or self.REGISTRY_SOURCE

        if not blank and not self.dev_mode:
            # Validate Registry
            with open(source, 'r') as registry_file:
                try:
                    json.loads(registry_file.read())
                except JSONDecodeError:
                    message = "The registry source {} is not valid JSON".format(
                        source)
                    self.log.critical(message)
                    raise self.ConfigurationError(message)
                else:
                    self.log.debug(
                        "Source registry {} is valid JSON".format(source))

        else:
            self.log.warn("Writing blank registry")
            open(output_filepath, 'w').close()  # write blank

        self.log.debug(
            "Successfully wrote registry to {}".format(output_filepath))
        return output_filepath
Exemple #42
0
class IRCd(Service):
	def __init__(self, configFileName):
		self.config = Config(self, configFileName)
		
		self.boundPorts = {}
		self.loadedModules = {}
		self._loadedModuleData = {}
		self._unloadingModules = {}
		self.commonModules = set()
		self.userCommands = {}
		self.serverCommands = {}
		self.channelModes = ({}, {}, {}, {})
		self.channelStatuses = {}
		self.channelStatusSymbols = {}
		self.channelStatusOrder = []
		self.channelModeTypes = {}
		self.userModes = ({}, {}, {}, {})
		self.userModeTypes = {}
		self.actions = {}
		self.storage = None
		self.storageSyncer = None
		self.dataCache = {}
		self.functionCache = {}
		
		self.serverID = None
		self.name = None
		self.isupport_tokens = {
			"CASEMAPPING": "strict-rfc1459",
			"CHANTYPES": "#",
		}
		self._uid = self._genUID()
		
		self.users = {}
		self.userNicks = CaseInsensitiveDictionary()
		self.channels = CaseInsensitiveDictionary(WeakValueDictionary)
		self.servers = {}
		self.serverNames = CaseInsensitiveDictionary()
		self.recentlyQuitUsers = {}
		self.recentlyQuitServers = {}
		self.recentlyDestroyedChannels = CaseInsensitiveDictionary()
		self.pruneRecentlyQuit = None
		self.pruneRecentChannels = None
		
		self._logFilter = LogLevelFilterPredicate()
		filterObserver = FilteringLogObserver(globalLogPublisher, (self._logFilter,))
		self.log = Logger("txircd", observer=filterObserver)
		
		self.startupTime = None
	
	def startService(self):
		self.log.info("Starting up...")
		self.startupTime = now()
		self.log.info("Loading configuration...")
		self.config.reload()
		self.name = self.config["server_name"]
		self.serverID = self.config["server_id"]
		self.log.info("Loading storage...")
		self.storage = shelve.open(self.config["datastore_path"], writeback=True)
		self.storageSyncer = LoopingCall(self.storage.sync)
		self.storageSyncer.start(self.config.get("storage_sync_interval", 5), now=False)
		self.log.info("Starting processes...")
		self.pruneRecentlyQuit = LoopingCall(self.pruneQuit)
		self.pruneRecentlyQuit.start(10, now=False)
		self.pruneRecentChannels = LoopingCall(self.pruneChannels)
		self.pruneRecentChannels.start(15, now=False)
		self.log.info("Loading modules...")
		self._loadModules()
		self.log.info("Binding ports...")
		self._bindPorts()
		self.log.info("txircd started!")
		try:
			self._logFilter.setLogLevelForNamespace("txircd", LogLevel.levelWithName(self.config["log_level"]))
		except (KeyError, InvalidLogLevelError):
			self._logFilter.setLogLevelForNamespace("txircd", LogLevel.warn)
		self.runActionStandard("startup")
	
	def stopService(self):
		stopDeferreds = []
		self.log.info("Disconnecting servers...")
		serverList = self.servers.values() # Take the list of server objects
		self.servers = {} # And then destroy the server dict to inhibit server objects generating lots of noise
		for server in serverList:
			if server.nextClosest == self.serverID:
				stopDeferreds.append(server.disconnectedDeferred)
				allUsers = self.users.keys()
				for user in allUsers:
					if user[:3] == server.serverID:
						del self.users[user]
				server.transport.loseConnection()
		self.log.info("Disconnecting users...")
		userList = self.users.values() # Basically do the same thing I just did with the servers
		self.users = {}
		for user in userList:
			if user.transport:
				stopDeferreds.append(user.disconnectedDeferred)
				user.transport.loseConnection()
		self.log.info("Unloading modules...")
		moduleList = self.loadedModules.keys()
		for module in moduleList:
			self._unloadModule(module, False) # Incomplete unload is done to save time and because side effects are destroyed anyway
		self.log.info("Stopping processes...")
		if self.pruneRecentlyQuit.running:
			self.pruneRecentlyQuit.stop()
		if self.pruneRecentChannels.running:
			self.pruneRecentChannels.stop()
		self.log.info("Closing data storage...")
		if self.storageSyncer.running:
			self.storageSyncer.stop()
		self.storage.close() # a close() will sync() also
		self.log.info("Releasing ports...")
		stopDeferreds.extend(self._unbindPorts())
		return DeferredList(stopDeferreds)
	
	def _loadModules(self):
		for module in getPlugins(IModuleData, txircd.modules):
			if module.name in self.loadedModules:
				continue
			if module.core or module.name in self.config["modules"]:
				self._loadModuleData(module)
		for moduleName in self.config["modules"]:
			if moduleName not in self.loadedModules:
				self.log.warn("The module {module} failed to load.", module=moduleName)
	
	def loadModule(self, moduleName):
		"""
		Loads a module of the specified name.
		Raises ModuleLoadError if the module cannot be loaded.
		If the specified module is currently being unloaded, returns the
		DeferredList specified by the module when it was unloading with a
		callback to try to load the module again once it succeeds.
		"""
		if moduleName in self._unloadingModules:
			deferList = self._unloadingModules[moduleName]
			deferList.addCallback(self._tryLoadAgain, moduleName)
			return deferList
		for module in getPlugins(IModuleData, txircd.modules):
			if module.name == moduleName:
				rebuild(importlib.import_module(module.__module__)) # getPlugins doesn't recompile modules, so let's do that ourselves.
				self._loadModuleData(module)
				self.log.info("Loaded module {module}.", module=moduleName)
				break
	
	def _tryLoadAgain(self, _, moduleName):
		self.loadModule(moduleName)
	
	def _loadModuleData(self, module):
		if not IModuleData.providedBy(module):
			raise ModuleLoadError ("???", "Module does not implement module interface")
		if not module.name:
			raise ModuleLoadError ("???", "Module did not provide a name")
		if module.name in self.loadedModules:
			self.log.debug("Not loading {module.name} because it's already loaded", module=module)
			return
		
		self.log.debug("Beginning to load {module.name}...", module=module)
		module.hookIRCd(self)
		try:
			module.verifyConfig(self.config)
		except ConfigError as e:
			raise ModuleLoadError(module.name, e)
		
		self.log.debug("Loading hooks from {module.name}...", module=module)
		moduleData = {
			"channelmodes": module.channelModes(),
			"usermodes": module.userModes(),
			"actions": module.actions(),
			"usercommands": module.userCommands(),
			"servercommands": module.serverCommands()
		}
		newChannelModes = ({}, {}, {}, {})
		newChannelStatuses = {}
		newUserModes = ({}, {}, {}, {})
		newActions = {}
		newUserCommands = {}
		newServerCommands = {}
		common = False
		self.log.debug("Processing hook data from {module.name}...", module=module)
		for mode in moduleData["channelmodes"]:
			if mode[0] in self.channelModeTypes:
				raise ModuleLoadError (module.name, "Tries to implement channel mode +{} when that mode is already implemented.".format(mode[0]))
			if not IMode.providedBy(mode[2]):
				raise ModuleLoadError (module.name, "Returns a channel mode object (+{}) that doesn't implement IMode.".format(mode[0]))
			if mode[1] == ModeType.Status:
				if mode[4] in self.channelStatusSymbols:
					raise ModuleLoadError (module.name, "Tries to create a channel rank with symbol {} when that symbol is already in use.".format(mode[4]))
				try:
					newChannelStatuses[mode[0]] = (mode[4], mode[3], mode[2])
				except IndexError:
					raise ModuleLoadError (module.name, "Specifies channel status mode {} without a rank or symbol".format(mode[0]))
			else:
				newChannelModes[mode[1]][mode[0]] = mode[2]
			common = True
		for mode in moduleData["usermodes"]:
			if mode[0] in self.userModeTypes:
				raise ModuleLoadError (module.name, "Tries to implement user mode +{} when that mode is already implemented.".format(mode[0]))
			if not IMode.providedBy(mode[2]):
				raise ModuleLoadError (module.name, "Returns a user mode object (+{}) that doesn't implement IMode.".format(mode[0]))
			newUserModes[mode[1]][mode[0]] = mode[2]
			common = True
		for action in moduleData["actions"]:
			if action[0] not in newActions:
				newActions[action[0]] = [(action[2], action[1])]
			else:
				newActions[action[0]].append((action[2], action[1]))
		for command in moduleData["usercommands"]:
			if not ICommand.providedBy(command[2]):
				raise ModuleLoadError (module.name, "Returns a user command object ({}) that doesn't implement ICommand.".format(command[0]))
			if command[0] not in newUserCommands:
				newUserCommands[command[0]] = []
			newUserCommands[command[0]].append((command[2], command[1]))
		for command in moduleData["servercommands"]:
			if not ICommand.providedBy(command[2]):
				raise ModuleLoadError (module.name, "Returns a server command object ({}) that doesnt implement ICommand.".format(command[0]))
			if command[0] not in newServerCommands:
				newServerCommands[command[0]] = []
			newServerCommands[command[0]].append((command[2], command[1]))
			common = True
		if not common:
			common = module.requiredOnAllServers

		self.log.debug("Loaded data from {module.name}; committing data and calling hooks...", module=module)
		
		module.load()
		
		self.loadedModules[module.name] = module
		self._loadedModuleData[module.name] = moduleData
		if common:
			self.commonModules.add(module.name)
		
		self.runActionStandard("moduleload", module.name)
		
		for modeType, typeSet in enumerate(newChannelModes):
			for mode, implementation in typeSet.iteritems():
				self.channelModeTypes[mode] = modeType
				self.channelModes[modeType][mode] = implementation
		for mode, data in newChannelStatuses.iteritems():
			self.channelModeTypes[mode] = ModeType.Status
			self.channelStatuses[mode] = data
			self.channelStatusSymbols[data[0]] = mode
			for index, status in enumerate(self.channelStatusOrder):
				if self.channelStatuses[status][1] < data[1]:
					self.channelStatusOrder.insert(index, mode)
					break
			else:
				self.channelStatusOrder.append(mode)
		for modeType, typeSet in enumerate(newUserModes):
			for mode, implementation in typeSet.iteritems():
				self.userModeTypes[mode] = modeType
				self.userModes[modeType][mode] = implementation
		for action, actionList in newActions.iteritems():
			if action not in self.actions:
				self.actions[action] = []
			for actionData in actionList:
				for index, handlerData in enumerate(self.actions[action]):
					if handlerData[1] < actionData[1]:
						self.actions[action].insert(index, actionData)
						break
				else:
					self.actions[action].append(actionData)
		for command, dataList in newUserCommands.iteritems():
			if command not in self.userCommands:
				self.userCommands[command] = []
			for data in dataList:
				for index, cmd in enumerate(self.userCommands[command]):
					if cmd[1] < data[1]:
						self.userCommands[command].insert(index, data)
						break
				else:
					self.userCommands[command].append(data)
		for command, dataList in newServerCommands.iteritems():
			if command not in self.serverCommands:
				self.serverCommands[command] = []
			for data in dataList:
				for index, cmd in enumerate(self.serverCommands[command]):
					if cmd[1] < data[1]:
						self.serverCommands[command].insert(index, data)
						break
				else:
					self.serverCommands[command].append(data)
		
		self.log.debug("Module {module.name} is now fully loaded.", module=module)
	
	def unloadModule(self, moduleName):
		"""
		Unloads the loaded module with the given name. Raises ValueError
		if the module cannot be unloaded because it's a core module.
		"""
		self._unloadModule(moduleName, True)
		self.log.info("Unloaded module {module}.", module=moduleName)
	
	def _unloadModule(self, moduleName, fullUnload):
		unloadDeferreds = []
		if moduleName not in self.loadedModules:
			return
		module = self.loadedModules[moduleName]
		if fullUnload and module.core:
			raise ValueError ("The module you're trying to unload is a core module.")
		moduleData = self._loadedModuleData[moduleName]
		d = module.unload()
		if d is not None:
			unloadDeferreds.append(d)
		
		if fullUnload:
			d = module.fullUnload()
			if d is not None:
				unloadDeferreds.append(d)
		
		for modeData in moduleData["channelmodes"]:
			if fullUnload: # Unset modes on full unload
				if modeData[1] == ModeType.Status:
					for channel in self.channels.itervalues():
						removeFromChannel = []
						for user, userData in channel.user.iteritems():
							if modeData[0] in userData["status"]:
								removeFromChannel.append((False, modeData[0], user.uuid))
						channel.setModes(removeFromChannel, self.serverID)
				elif modeData[1] == ModeType.List:
					for channel in self.channels.itervalues():
						if modeData[0] in channel.modes:
							removeFromChannel = []
							for paramData in channel.modes[modeData[0]]:
								removeFromChannel.append((False, modeData[0], paramData[0]))
							channel.setModes(removeFromChannel, self.serverID)
				else:
					for channel in self.channels.itervalues():
						if modeData[0] in channel.modes:
							channel.setModes([(False, modeData[0], channel.modes[modeData[0]])], self.serverID)
			
			if modeData[1] == ModeType.Status:
				del self.channelStatuses[modeData[0]]
				del self.channelStatusSymbols[modeData[4]]
				self.channelStatusOrder.remove(modeData[0])
			else:
				del self.channelModes[modeData[1]][modeData[0]]
			del self.channelModeTypes[modeData[0]]
		for modeData in moduleData["usermodes"]:
			if fullUnload: # Unset modes on full unload
				if modeData[1] == ModeType.List:
					for user in self.users.itervalues():
						if modeData[0] in user.modes:
							removeFromUser = []
							for paramData in user.modes[modeData[0]]:
								removeFromUser.append((False, modeData[0], paramData[0]))
							user.setModes(removeFromUser, self.serverID)
				else:
					for user in self.users.itervalues():
						if modeData[0] in user.modes:
							user.setModes([(False, modeData[0], user.modes[modeData[0]])], self.serverID)
			
			del self.userModes[modeData[1]][modeData[0]]
			del self.userModeTypes[modeData[0]]
		for actionData in moduleData["actions"]:
			self.actions[actionData[0]].remove((actionData[2], actionData[1]))
			if not self.actions[actionData[0]]:
				del self.actions[actionData[0]]
		for commandData in moduleData["usercommands"]:
			self.userCommands[commandData[0]].remove((commandData[2], commandData[1]))
			if not self.userCommands[commandData[0]]:
				del self.userCommands[commandData[0]]
		for commandData in moduleData["servercommands"]:
			self.serverCommands[commandData[0]].remove((commandData[2], commandData[1]))
			if not self.serverCommands[commandData[0]]:
				del self.serverCommands[commandData[0]]
		
		del self.loadedModules[moduleName]
		del self._loadedModuleData[moduleName]
		
		if fullUnload:
			self.runActionStandard("moduleunload", module.name)
		
		if unloadDeferreds:
			deferList = DeferredList(unloadDeferreds)
			self._unloadingModules[moduleName] = deferList
			deferList.addCallback(self._removeFromUnloadingList, moduleName)
			return deferList
	
	def _removeFromUnloadingList(self, _, moduleName):
		del self._unloadingModules[moduleName]
	
	def reloadModule(self, moduleName):
		"""
		Reloads the module with the given name.
		Returns a DeferredList if the module unloads with one or more Deferreds.
		May raise ModuleLoadError if the module cannot be loaded.
		"""
		deferList = self._unloadModule(moduleName, False)
		if deferList is None:
			deferList = self.loadModule(moduleName)
		else:
			deferList.addCallback(lambda result: self.loadModule(moduleName))
		return deferList

	def verifyConfig(self, config):
		# IRCd
		if "server_name" not in config:
			raise ConfigValidationError("server_name", "required item not found in configuration file.")
		if not isinstance(config["server_name"], basestring):
			raise ConfigValidationError("server_name", "value must be a string")
		if len(config["server_name"]) > 64:
			config["server_name"] = config["server_name"][:64]
			self.logConfigValidationWarning("server_name", "value is too long and has been truncated", config["server_name"])
		if not re.match(r"^[a-zA-Z0-9.-]+\.[a-zA-Z0-9.-]+$", config["server_name"]):
			raise ConfigValidationError("server_name", "server name must look like a valid hostname.")
		if "server_id" in config:
			if not isinstance(config["server_id"], basestring):
				raise ConfigValidationError("server_id", "value must be a string")
			else:
				config["server_id"] = config["server_id"].upper()
		else:
			randFromName = random.Random(config["server_name"])
			serverID = randFromName.choice(string.digits) + randFromName.choice(string.digits + string.ascii_uppercase) + randFromName.choice(string.digits + string.ascii_uppercase)
			config["server_id"] = serverID
		if len(config["server_id"]) != 3 or not config["server_id"].isalnum() or not config["server_id"][0].isdigit():
			raise ConfigValidationError("server_id", "value must be a 3-character alphanumeric string starting with a number.")
		if "server_description" not in config:
			raise ConfigValidationError("server_description", "required item not found in configuration file.")
		if not isinstance(config["server_description"], basestring):
			raise ConfigValidationError("server_description", "value must be a string")
		if not config["server_description"]:
			raise ConfigValidationError("server_description", "value must not be an empty string")
		if len(config["server_description"]) > 255:
			config["server_description"] = config["server_description"][:255]
			self.logConfigValidationWarning("server_description", "value is too long and has been truncated", config["server_description"])
		if "network_name" not in config:
			raise ConfigValidationError("network_name", "required item not found in configuration file.")
		if not isinstance(config["network_name"], basestring):
			raise ConfigValidationError("network_name", "value must be a string")
		if not config["network_name"]:
			raise ConfigValidationError("network_name", "value must not be an empty string")
		if " " in config["network_name"]:
			raise ConfigValidationError("network_name", "value cannot have spaces")
		if len(config["network_name"]) > 32:
			config["network_name"] = config["network_name"][:32]
			self.logConfigValidationWarning("network_name", "value is too long", config["network_name"])
		if "bind_client" not in config:
			config["bind_client"] = [ "tcp:6667:interface={::}" ]
			self.logConfigValidationWarning("bind_client", "no default client binding specified", "[ \"tcp:6667:interface={::}\" ]")
		if not isinstance(config["bind_client"], list):
			raise ConfigValidationError("bind_client", "value must be a list")
		for bindDesc in config["bind_client"]:
			if not isinstance(bindDesc, basestring):
				raise ConfigValidationError("bind_client", "every entry must be a string")
		if "bind_server" not in config:
			config["bind_server"] = []
		if not isinstance(config["bind_server"], list):
			raise ConfigValidationError("bind_server", "value must be a list")
		for bindDesc in config["bind_server"]:
			if not isinstance(bindDesc, basestring):
				raise ConfigValidationError("bind_server", "every entry must be a string")
		if "modules" not in config:
			config["modules"] = []
		if not isinstance(config["modules"], list):
			raise ConfigValidationError("modules", "value must be a list")
		for module in config["modules"]:
			if not isinstance(module, basestring):
				raise ConfigValidationError("modules", "every entry must be a string")
		if "links" in config:
			if not isinstance(config["links"], dict):
				raise ConfigValidationError("links", "value must be a dictionary")
			for desc, server in config["links"].iteritems():
				if not isinstance(desc, basestring):
					raise ConfigValidationError("links", "\"{}\" is an invalid server description".format(desc))
				if not isinstance(server, dict):
					raise ConfigValidationError("links", "values for \"{}\" must be a dictionary".format(desc))
				if "connect_descriptor" not in server:
					raise ConfigValidationError("links", "server \"{}\" must contain a \"connect_descriptor\" value".format(desc))
				if "in_password" in server:
					if not isinstance(server["in_password"], basestring):
						config["links"][desc]["in_password"] = str(server["in_password"])
				if "out_password" in server:
					if not isinstance(server["out_password"], basestring):
						config["links"][desc]["out_password"] = str(server["out_password"])
		if "datastore_path" not in config:
			config["datastore_path"] = "data.db"
		if "storage_sync_interval" in config and not isinstance(config["storage_sync_interval"], int):
			raise ConfigValidationError(config["storage_sync_interval"], "invalid number")

		# Channels
		if "channel_name_length" in config:
			if not isinstance(config["channel_name_length"], int) or config["channel_name_length"] < 0:
				raise ConfigValidationError("channel_name_length", "invalid number")
			elif config["channel_name_length"] > 64:
				config["channel_name_length"] = 64
				self.logConfigValidationWarning("channel_name_length", "value is too large", 64)
		if "modes_per_line" in config:
			if not isinstance(config["modes_per_line"], int) or config["modes_per_line"] < 0:
				raise ConfigValidationError("modes_per_line", "invalid number")
			elif config["modes_per_line"] > 20:
				config["modes_per_line"] = 20
				self.logConfigValidationWarning("modes_per_line", "value is too large", 20)
		if "channel_listmode_limit" in config:
			if not isinstance(config["channel_listmode_limit"], int) or config["channel_listmode_limit"] < 0:
				raise ConfigValidationError("channel_listmode_limit", "invalid number")
			if config["channel_listmode_limit"] > 256:
				config["channel_listmode_limit"] = 256
				self.logConfigValidationWarning("channel_listmode_limit", "value is too large", 256)

		# Users
		if "user_registration_timeout" in config:
			if not isinstance(config["user_registration_timeout"], int) or config["user_registration_timeout"] < 0:
				raise ConfigValidationError("user_registration_timeout", "invalid number")
			elif config["user_registration_timeout"] < 10:
				config["user_registration_timeout"] = 10
				self.logConfigValidationWarning("user_registration_timeout", "timeout could be too short for clients to register in time", 10)
		if "user_ping_frequency" in config and (not isinstance(config["user_ping_frequency"], int) or config["user_ping_frequency"] < 0):
			raise ConfigValidationError("user_ping_frequency", "invalid number")
		if "hostname_length" in config:
			if not isinstance(config["hostname_length"], int) or config["hostname_length"] < 0:
				raise ConfigValidationError("hostname_length", "invalid number")
			elif config["hostname_length"] > 64:
				config["hostname_length"] = 64
				self.logConfigValidationWarning("hostname_length", "value is too large", 64)
			elif config["hostname_length"] < 4:
				config["hostname_length"] = 4
				self.logConfigValidationWarning("hostname_length", "value is too small", 4)
		if "ident_length" in config:
			if not isinstance(config["ident_length"], int) or config["ident_length"] < 0:
				raise ConfigValidationError("ident_length", "invalid number")
			elif config["ident_length"] > 12:
				config["ident_length"] = 12
				self.logConfigValidationWarning("ident_length", "value is too large", 12)
			elif config["ident_length"] < 1:
				config["ident_length"] = 1
				self.logConfigValidationWarning("ident_length", "value is too small", 1)
		if "gecos_length" in config:
			if not isinstance(config["gecos_length"], int) or config["gecos_length"] < 0:
				raise ConfigValidationError("gecos_length", "invalid number")
			elif config["gecos_length"] > 128:
				config["gecos_length"] = 128
				self.logConfigValidationWarning("gecos_length", "value is too large", 128)
			elif config["gecos_length"] < 1:
				config["gecos_length"] = 1
				self.logConfigValidationWarning("gecos_length", "value is too small", 1)
		if "user_listmode_limit" in config:
			if not isinstance(config["user_listmode_limit"], int) or config["user_listmode_limit"] < 0:
				raise ConfigValidationError("user_listmode_limit", "invalid number")
			if config["user_listmode_limit"] > 256:
				config["user_listmode_limit"] = 256
				self.logConfigValidationWarning("user_listmode_limit", "value is too large", 256)

		# Servers
		if "server_registration_timeout" in config:
			if not isinstance(config["server_registration_timeout"], int) or config["server_registration_timeout"] < 0:
				raise ConfigValidationError("server_registration_timeout", "invalid number")
			elif config["server_registration_timeout"] < 10:
				config["server_registration_timeout"] = 10
				self.logConfigValidationWarning("server_registration_timeout", "timeout could be too short for servers to register in time", 10)
		if "server_ping_frequency" in config and (not isinstance(config["server_ping_frequency"], int) or config["server_ping_frequency"] < 0):
			raise ConfigValidationError("server_ping_frequency", "invalid number")

		for module in self.loadedModules.itervalues():
			module.verifyConfig(config)

	def logConfigValidationWarning(self, key, message, default):
		self.log.warn("Config value \"{configKey}\" is invalid ({message}); the value has been set to a default of \"{default}\".", configKey=key, message=message, default=default)

	def rehash(self):
		"""
		Reloads the configuration file and applies changes.
		"""
		self.log.info("Rehashing...")
		self.config.reload()
		d = self._unbindPorts() # Unbind the ports that are bound
		if d: # And then bind the new ones
			DeferredList(d).addCallback(lambda result: self._bindPorts())
		else:
			self._bindPorts()
		
		try:
			self._logFilter.setLogLevelForNamespace("txircd", LogLevel.levelWithName(self.config["log_level"]))
		except (KeyError, InvalidLogLevelError):
			pass # If we can't set a new log level, we'll keep the old one
		
		for module in self.loadedModules.itervalues():
			module.rehash()
	
	def _bindPorts(self):
		for bindDesc in self.config["bind_client"]:
			try:
				endpoint = serverFromString(reactor, unescapeEndpointDescription(bindDesc))
			except ValueError as e:
				self.log.error(e)
				continue
			listenDeferred = endpoint.listen(UserFactory(self))
			listenDeferred.addCallback(self._savePort, bindDesc, "client")
			listenDeferred.addErrback(self._logNotBound, bindDesc)
		for bindDesc in self.config["bind_server"]:
			try:
				endpoint = serverFromString(reactor, unescapeEndpointDescription(bindDesc))
			except ValueError as e:
				self.log.error(e)
				continue
			listenDeferred = endpoint.listen(ServerListenFactory(self))
			listenDeferred.addCallback(self._savePort, bindDesc, "server")
			listenDeferred.addErrback(self._logNotBound, bindDesc)
	
	def _unbindPorts(self):
		deferreds = []
		for port in self.boundPorts.itervalues():
			d = port.stopListening()
			if d:
				deferreds.append(d)
		return deferreds
	
	def _savePort(self, port, desc, portType):
		self.boundPorts[desc] = port
		self.log.debug("Bound endpoint '{endpointDescription}' for {portType} connections.", endpointDescription=desc, portType=portType)
	
	def _logNotBound(self, err, desc):
		self.log.error("Could not bind '{endpointDescription}': {errorMsg}", endpointDescription=desc, errorMsg=err)
	
	def createUUID(self):
		"""
		Gets the next UUID for a new client.
		"""
		newUUID = self.serverID + self._uid.next()
		while newUUID in self.users: # It'll take over 1.5 billion connections to loop around, but we still
			newUUID = self.serverID + self._uid.next() # want to be extra safe and avoid collisions
		self.log.debug("Generated new UUID {uuid}", uuid=newUUID)
		return newUUID
	
	def _genUID(self):
		uid = "AAAAAA"
		while True:
			yield uid
			uid = self._incrementUID(uid)
	
	def _incrementUID(self, uid):
		if uid == "Z": # The first character must be a letter
			return "A" # So wrap that around
		if uid[-1] == "9":
			return self._incrementUID(uid[:-1]) + "A"
		if uid[-1] == "Z":
			return uid[:-1] + "0"
		return uid[:-1] + chr(ord(uid[-1]) + 1)
	
	def pruneQuit(self):
		compareTime = now() - timedelta(seconds=10)
		remove = []
		for uuid, timeQuit in self.recentlyQuitUsers.iteritems():
			if timeQuit < compareTime:
				remove.append(uuid)
		for uuid in remove:
			del self.recentlyQuitUsers[uuid]
		
		remove = []
		for serverID, timeQuit in self.recentlyQuitServers.iteritems():
			if timeQuit < compareTime:
				remove.append(serverID)
		for serverID in remove:
			del self.recentlyQuitServers[serverID]
	
	def pruneChannels(self):
		removeChannels = []
		for channel, remove in self.recentlyDestroyedChannels.iteritems():
			if remove:
				removeChannels.append(channel)
			elif channel not in self.channels:
				self.recentlyDestroyedChannels[channel] = True
		for channel in removeChannels:
			del self.recentlyDestroyedChannels[channel]
	
	def generateISupportList(self):
		isupport = self.isupport_tokens.copy()
		statusSymbolOrder = "".join([self.channelStatuses[status][0] for status in self.channelStatusOrder])
		isupport["CHANMODES"] = ",".join(["".join(modes) for modes in self.channelModes])
		isupport["CHANNELLEN"] = self.config.get("channel_name_length", 64)
		isupport["NETWORK"] = self.config["network_name"]
		isupport["PREFIX"] = "({}){}".format("".join(self.channelStatusOrder), statusSymbolOrder)
		isupport["STATUSMSG"] = statusSymbolOrder
		isupport["USERMODES"] = ",".join(["".join(modes) for modes in self.userModes])
		self.runActionStandard("buildisupport", isupport)
		isupportList = []
		for key, val in isupport.iteritems():
			if val is None:
				isupportList.append(key)
			else:
				isupportList.append("{}={}".format(key, val))
		return isupportList
	
	def connectServer(self, name):
		"""
		Connect a server with the given name in the configuration.
		Returns a Deferred for the connection when we can successfully connect
		or None if the server is already connected or if we're unable to find
		information for that server in the configuration.
		"""
		if name in self.serverNames:
			return None
		if name not in self.config.get("links", {}):
			return None
		serverConfig = self.config["links"][name]
		endpoint = clientFromString(reactor, unescapeEndpointDescription(serverConfig["connect_descriptor"]))
		d = endpoint.connect(ServerConnectFactory(self))
		d.addCallback(self._completeServerConnection, name)
		return d
	
	def _completeServerConnection(self, result, name):
		self.log.info("Connected to server {serverName}", serverName=name)
		self.runActionStandard("initiateserverconnection", result)
	
	def broadcastToServers(self, fromServer, command, *params, **kw):
		"""
		Broadcasts a message to all connected servers. The fromServer parameter
		should be the server from which the message came; if this server is the
		originating server, specify None for fromServer.
		"""
		for server in self.servers.itervalues():
			if server.nextClosest == self.serverID and server != fromServer:
				server.sendMessage(command, *params, **kw)
	
	def _getActionModes(self, actionName, *params, **kw):
		users = []
		channels = []
		if "users" in kw:
			users = kw["users"]
		if "channels" in kw:
			channels = kw["channels"]
		
		functionList = []
		
		if users:
			genericUserActionName = "modeactioncheck-user-{}".format(actionName)
			genericUserActionNameWithChannel = "modeactioncheck-user-withchannel-{}".format(actionName)
			for modeType in self.userModes:
				for mode, modeObj in modeType.iteritems():
					if actionName not in modeObj.affectedActions:
						continue
					priority = modeObj.affectedActions[actionName]
					actionList = []
					# Because Python doesn't properly capture variables in lambdas, we have to force static capture
					# by wrapping lambdas in more lambdas.
					# I wish Python wasn't this gross.
					for action in self.actions.get("modeactioncheck-user", []):
						actionList.append(((lambda action, actionName, mode: lambda user, *params: action[0](actionName, mode, user, *params))(action, actionName, mode), action[1]))
					for action in self.actions.get("modeactioncheck-user-withchannel", []):
						for channel in channels:
							actionList.append(((lambda action, actionName, mode, channel: lambda user, *params: action[0](actionName, mode, user, channel, *params))(action, actionName, mode, channel), action[1]))
					for action in self.actions.get(genericUserActionName, []):
						actionList.append(((lambda action, mode: lambda user, *params: action[0](mode, user, *params))(action, mode), action[1]))
					for action in self.actions.get(genericUserActionNameWithChannel, []):
						for channel in channels:
							actionList.append(((lambda action, mode, channel: lambda user, *params: action[0](mode, user, channel, *params))(action, mode, channel), action[1]))
					modeUserActionName = "modeactioncheck-user-{}-{}".format(mode, actionName)
					modeUserActionNameWithChannel = "modeactioncheck-user-withchannel-{}-{}".format(mode, actionName)
					for action in self.actions.get(modeUserActionNameWithChannel, []):
						for channel in channels:
							actionList.append(((lambda action, channel: lambda user, *params: action[0](user, channel, *params))(action, channel), action[1]))
					actionList = sorted(self.actions.get(modeUserActionName, []) + actionList, key=lambda action: action[1], reverse=True)
					applyUsers = []
					for user in users:
						for action in actionList:
							param = action[0](user, *params)
							if param is not None:
								if param is not False:
									applyUsers.append((user, param))
								break
					for user, param in applyUsers:
						functionList.append(((lambda modeObj, actionName, user, param: lambda *params: modeObj.apply(actionName, user, param, *params))(modeObj, actionName, user, param), priority))
		
		if channels:
			genericChannelActionName = "modeactioncheck-channel-{}".format(actionName)
			genericChannelActionNameWithUser = "******".format(actionName)
			for modeType in self.channelModes:
				for mode, modeObj in modeType.iteritems():
					if actionName not in modeObj.affectedActions:
						continue
					priority = modeObj.affectedActions[actionName]
					actionList = []
					for action in self.actions.get("modeactioncheck-channel", []):
						actionList.append(((lambda action, actionName, mode: lambda channel, *params: action[0](actionName, mode, channel, *params))(action, actionName, mode), action[1]))
					for action in self.actions.get("modeactioncheck-channel-withuser", []):
						for user in users:
							actionList.append(((lambda action, actionName, mode, user: lambda channel, *params: action[0](actionName, mode, channel, user, *params))(action, actionName, mode, user), action[1]))
					for action in self.actions.get(genericChannelActionName, []):
						actionList.append(((lambda action, mode: lambda channel, *params: action[0](mode, channel, *params))(action, mode), action[1]))
					for action in self.actions.get(genericChannelActionNameWithUser, []):
						for user in users:
							actionList.append(((lambda action, mode, user: lambda channel, *params: action[0](mode, channel, user, *params))(action, mode, user), action[1]))
					modeChannelActionName = "modeactioncheck-channel-{}-{}".format(mode, actionName)
					modeChannelActionNameWithUser = "******".format(mode, actionName)
					for action in self.actions.get(modeChannelActionNameWithUser, []):
						for user in users:
							actionList.append(((lambda action, user: lambda channel, *params: action[0](channel, user, *params))(action, user), action[1]))
					actionList = sorted(self.actions.get(modeChannelActionName, []) + actionList, key=lambda action: action[1], reverse=True)
					applyChannels = []
					for channel in channels:
						for action in actionList:
							param = action[0](channel, *params)
							if param is not None:
								if param is not False:
									applyChannels.append((channel, param))
								break
					for channel, param in applyChannels:
						functionList.append(((lambda modeObj, actionName, channel, param: lambda *params: modeObj.apply(actionName, channel, param, *params))(modeObj, actionName, channel, param), priority))
		return functionList
	
	def _getActionFunctionList(self, actionName, *params, **kw):
		functionList = self.actions.get(actionName, [])
		functionList = functionList + self._getActionModes(actionName, *params, **kw)
		return sorted(functionList, key=lambda action: action[1], reverse=True)
	
	def _combineActionFunctionLists(self, actionLists):
		"""
		Combines multiple lists of action functions into one.
		Assumes all lists are sorted.
		Takes a dict mapping action names to their action function lists.
		Returns a list in priority order (highest to lowest) of (actionName, function) tuples.
		"""
		fullActionList = []
		for actionName, actionList in actionLists.iteritems():
			insertPos = 0
			for action in actionList:
				try:
					while fullActionList[insertPos][1] > action[1]:
						insertPos += 1
					fullActionList.insert(insertPos, (actionName, action[0]))
				except IndexError:
					fullActionList.append((actionName, action[0]))
				insertPos += 1
		return fullActionList
	
	def runActionStandard(self, actionName, *params, **kw):
		"""
		Calls all functions for a given action with the given parameters in
		priority order. Accepts the 'users' and 'channels' keyword arguments to
		determine which mode handlers should be included.
		"""
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			action[0](*params)
	
	def runActionUntilTrue(self, actionName, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until one of them returns a true value. Returns True
		when one of the functions returned True. Accepts the 'users' and
		'channels' keyword arguments to determine which mode handlers should be
		included.
		"""
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			if action[0](*params):
				return True
		return False
	
	def runActionUntilFalse(self, actionName, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until one of them returns a false value. Returns True
		when one of the functions returned False. Accepts the 'users' and
		'channels' keyword arguments to determine which mode handlers should be
		included.
		"""
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			if not action[0](*params):
				return True
		return False
	
	def runActionUntilValue(self, actionName, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until one of them returns a non-None value. Returns the
		value returned by the function that returned a non-None value. Accepts
		the 'users' and 'channels' keyword arguments to determine which mode
		handlers should be included.
		"""
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			value = action[0](*params)
			if value is not None:
				return value
		return None
	
	def runActionFlagTrue(self, actionName, *params, **kw):
		"""
		Calls all functions for a given action with the given parameters in
		priority order. Returns True when one of the functions returns a true
		value. Accepts the 'users' and 'channels' keyword arguments to
		determine which mode handlers should be included.
		"""
		oneIsTrue = False
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			if action[0](*params):
				oneIsTrue = True
		return oneIsTrue
	
	def runActionFlagFalse(self, actionName, *params, **kw):
		"""
		Calls all functions for a given action with the given parameters in
		priority order. Returns True when one of the functions returns a false
		value. Accepts the 'users' and 'channels' keyword arguments to
		determine which mode handlers should be included.
		"""
		oneIsFalse = False
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			if action[0](*params):
				oneIsFalse = True
		return oneIsFalse
	
	def runActionProcessing(self, actionName, data, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until the provided data is all processed (the data
		parameter becomes empty). Accepts 'users' and 'channels' keyword
		arguments to determine which mode handlers should be included.
		"""
		actionList = self._getActionFunctionList(actionName, data, *params, **kw)
		for action in actionList:
			action[0](data, *params)
			if not data:
				return
	
	def runActionProcessingMultiple(self, actionName, dataList, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until the provided data is all processed (all of the
		data structures in the dataList parameter become empty). Accepts
		'users' and 'channels' keyword arguments to determine which mode
		handlers should be included.
		"""
		paramList = dataList + params
		actionList = self._getActionFunctionList(actionName, *paramList, **kw)
		for action in actionList:
			action[0](*paramList)
			for data in dataList:
				if data:
					break
			else:
				return
	
	def runComboActionStandard(self, actionList, **kw):
		"""
		Calls all functions for the given actions with the given parameters in
		priority order. Actions are specifed as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			actionFunc(*actionParameters[actionName])
	
	def runComboActionUntilTrue(self, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until one of the functions returns a true value. Actions
		are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns True if one of the functions returned a true value. Accepts
		'users' and 'channels' keyword arguments to determine which mode
		handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			if actionFunc(*actionParameters[actionName]):
				return True
		return False
	
	def runComboActionUntilFalse(self, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until one of the functions returns a false value.
		Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns True if one of the functions returned a false value. Accepts
		'users' and 'channels' keyword arguments to determine which mode
		handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			if not actionFunc(*actionParameters[actionName]):
				return True
		return False
	
	def runComboActionUntilValue(self, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until one of the functions returns a non-None value.
		Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns the value returned by the function that returned a non-None
		value. Accepts 'users' and 'channels' keyword arguments to determine
		which mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			value = actionFunc(*actionParameters[actionName])
			if value is not None:
				return value
		return None
	
	def runComboActionFlagTrue(self, actionList, **kw):
		"""
		Calls all functions for the given actions with the given parameters in
		priority order. Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns True if any of the functions called returned a true value.
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		oneIsTrue = False
		for actionName, actionFunc in funcList:
			if actionFunc(*actionParameters[actionName]):
				oneIsTrue = True
		return oneIsTrue
	
	def runComboActionFlagFalse(self, actionList, **kw):
		"""
		Calls all functions for the given actions with the given parameters in
		priority order. Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns True if any of the functions called returned a false value.
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		oneIsFalse = False
		for actionName, actionFunc in funcList:
			if not actionFunc(*actionParameters[actionName]):
				oneIsFalse = True
		return oneIsFalse
	
	def runComboActionProcessing(self, data, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until the data given has been processed (the data
		parameter becomes empty). Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = [data] + action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			actionFunc(*actionParameters[actionName])
			if not data:
				break
	
	def runComboActionProcessingMultiple(self, dataList, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until the data given has been processed (all the data
		items in the dataList parameter become empty). Actions are specified as
		a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = dataList + action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			actionFunc(*actionParameters[actionName])
			for data in dataList:
				if data:
					break
			else:
				return
Exemple #43
0
        """
        Показывает уведомление вверху экрана.
        """

        self.notifications_mgr.notify(text)

    def toggle_chat(self, button):
        self.chat.toggle()


class StdoutHook():
    """
    Дублирует stdout в окно чата.
    """

    def __init__(self, chat):
        self.ex_stdout = sys.stdout # in case there's already a hook installed by someone
        sys.stdout = self
        self.chat = chat

    def write(self, s):
        s = s.strip()
        if s:
            self.ex_stdout.write(s)
            self.chat.text += 'STDOUT> ' + s + '\n'


if __name__ == '__main__':
    log.info('Start client')
    TwistedClientApp().run()