Esempio n. 1
0
class Poll(object):
    """
    Poll the Telepresence client
    - Log periodically so `kubectl logs` doesn't go idle
    - Generate traffic so `kubectl port-forward` doesn't go idle
    """

    def __init__(self, reactor):
        self.reactor = reactor
        self.log = Logger("Poll")
        pool = HTTPConnectionPool(reactor)
        pool._factory = QuietHTTP11ClientFactory
        self.agent = Agent(reactor, connectTimeout=10.0, pool=pool)

    def periodic(self):
        """Periodically query the client"""
        deferred = self.agent.request(b"HEAD", b"http://localhost:9055/")
        deferred.addCallback(self.success)
        deferred.addErrback(self.failure)

    def success(self, response):
        """Client is still there"""
        if response.code == 200:
            self.log.info("Checkpoint")
        else:
            self.log.warn("Client returned code {}".format(response.code))

    def failure(self, failure):
        """Client is not there"""
        self.log.error("Failed to contact Telepresence client:")
        self.log.error(failure.getErrorMessage())
        self.log.warn("Perhaps it's time to exit?")
Esempio n. 2
0
    def test_log_backpressure_info(self):
        """
        When backpressure is hit, DEBUG and INFO logs will be shed.
        """
        log_config = {
            "loggers": {
                "synapse": {
                    "level": "DEBUG"
                }
            },
            "drains": {
                "tersejson": {
                    "type": "network_json_terse",
                    "host": "127.0.0.1",
                    "port": 8000,
                    "maximum_buffer": 10,
                }
            },
        }

        # Begin the logger with our config
        beginner = FakeBeginner()
        setup_structured_logging(
            self.hs,
            self.hs.config,
            log_config,
            logBeginner=beginner,
            redirect_stdlib_logging=False,
        )

        logger = Logger(namespace="synapse.logging.test_terse_json",
                        observer=beginner.observers[0])

        # Send some debug messages
        for i in range(0, 3):
            logger.debug("debug %s" % (i, ))

        # Send a bunch of useful messages
        for i in range(0, 10):
            logger.warn("test warn %s" % (i, ))

        # Send a bunch of info messages
        for i in range(0, 3):
            logger.info("test message %s" % (i, ))

        # The last debug message pushes it past the maximum buffer
        logger.debug("too much debug")

        # Allow the reconnection
        client, server = connect_client(self.reactor, 0)
        self.pump()

        # The 10 warnings made it through, the debugs and infos were elided
        logs = list(map(json.loads, server.data.decode("utf8").splitlines()))
        self.assertEqual(len(logs), 10)

        self.assertEqual(Counter([x["level"] for x in logs]), {"WARN": 10})
Esempio n. 3
0
    def test_log_backpressure_cut_middle(self):
        """
        When backpressure is hit, and no more DEBUG and INFOs cannot be culled,
        it will cut the middle messages out.
        """
        log_config = {
            "loggers": {
                "synapse": {
                    "level": "DEBUG"
                }
            },
            "drains": {
                "tersejson": {
                    "type": "network_json_terse",
                    "host": "127.0.0.1",
                    "port": 8000,
                    "maximum_buffer": 10,
                }
            },
        }

        # Begin the logger with our config
        beginner = FakeBeginner()
        setup_structured_logging(
            self.hs,
            self.hs.config,
            log_config,
            logBeginner=beginner,
            redirect_stdlib_logging=False,
        )

        logger = Logger(namespace="synapse.logging.test_terse_json",
                        observer=beginner.observers[0])

        # Send a bunch of useful messages
        for i in range(0, 20):
            logger.warn("test warn", num=i)

        # Allow the reconnection
        client, server = connect_client(self.reactor, 0)
        self.pump()

        # The first five and last five warnings made it through, the debugs and
        # infos were elided
        logs = list(map(json.loads, server.data.decode("utf8").splitlines()))
        self.assertEqual(len(logs), 10)
        self.assertEqual(Counter([x["level"] for x in logs]), {"WARN": 10})
        self.assertEqual([0, 1, 2, 3, 4, 15, 16, 17, 18, 19],
                         [x["num"] for x in logs])
Esempio n. 4
0
        def __attempt(round=1, interval=10) -> Ursula:
            if round > 3:
                raise ConnectionRefusedError("Host {} Refused Connection".format(teacher_uri))

            try:
                teacher = cls.from_seed_and_stake_info(seed_uri='{host}:{port}'.format(host=hostname, port=port),
                                                       federated_only=federated_only,
                                                       checksum_address=checksum_address,
                                                       minimum_stake=min_stake,
                                                       network_middleware=network_middleware)

            except NodeSeemsToBeDown:
                log = Logger(cls.__name__)
                log.warn("Can't connect to seed node (attempt {}).  Will retry in {} seconds.".format(round, interval))
                time.sleep(interval)
                return __attempt(round=round + 1)
            else:
                return teacher
Esempio n. 5
0
class SolidityCompiler:

    __default_contract_version = 'v0.0.0'
    __default_contract_dir = os.path.join(dirname(abspath(__file__)), 'source')

    __compiled_contracts_dir = 'contracts'
    __zeppelin_library_dir = 'zeppelin'
    __aragon_library_dir = 'aragon'

    optimization_runs = 200

    class CompilerError(Exception):
        pass

    class VersionError(Exception):
        pass

    @classmethod
    def default_contract_dir(cls):
        return cls.__default_contract_dir

    def __init__(self,
                 source_dirs: List[SourceDirs] = None,
                 ignore_solidity_check: bool = False) -> None:

        # Allow for optional installation
        from solcx.install import get_executable

        self.log = Logger('solidity-compiler')

        version = SOLIDITY_COMPILER_VERSION if not ignore_solidity_check else None
        self.__sol_binary_path = get_executable(version=version)

        if source_dirs is None or len(source_dirs) == 0:
            self.source_dirs = [
                SourceDirs(root_source_dir=self.__default_contract_dir)
            ]
        else:
            self.source_dirs = source_dirs

    def compile(self) -> dict:
        interfaces = dict()
        for root_source_dir, other_source_dirs in self.source_dirs:
            if root_source_dir is None:
                self.log.warn("One of the root directories is None")
                continue

            raw_interfaces = self._compile(root_source_dir, other_source_dirs)
            for name, data in raw_interfaces.items():
                # Extract contract version from docs
                version_search = re.search(
                    r"""
                
                \"details\":  # @dev tag in contract docs
                \".*?         # Skip any data in the beginning of details
                \|            # Beginning of version definition |
                (v            # Capture version starting from symbol v
                \d+           # At least one digit of major version
                \.            # Digits splitter
                \d+           # At least one digit of minor version
                \.            # Digits splitter
                \d+           # At least one digit of patch
                )             # End of capturing
                \|            # End of version definition |
                .*?\"         # Skip any data in the end of details
                
                """, data['devdoc'], re.VERBOSE)
                version = version_search.group(
                    1) if version_search else self.__default_contract_version
                try:
                    existence_data = interfaces[name]
                except KeyError:
                    existence_data = dict()
                    interfaces.update({name: existence_data})
                if version not in existence_data:
                    existence_data.update({version: data})
        return interfaces

    def _compile(self, root_source_dir: str, other_source_dirs: [str]) -> dict:
        """Executes the compiler with parameters specified in the json config"""

        # Allow for optional installation
        from solcx import compile_files
        from solcx.exceptions import SolcError

        self.log.info("Using solidity compiler binary at {}".format(
            self.__sol_binary_path))
        contracts_dir = os.path.join(root_source_dir,
                                     self.__compiled_contracts_dir)
        self.log.info(
            "Compiling solidity source files at {}".format(contracts_dir))

        source_paths = set()
        source_walker = os.walk(top=contracts_dir, topdown=True)
        if other_source_dirs is not None:
            for source_dir in other_source_dirs:
                other_source_walker = os.walk(top=source_dir, topdown=True)
                source_walker = itertools.chain(source_walker,
                                                other_source_walker)

        for root, dirs, files in source_walker:
            for filename in files:
                if filename.endswith('.sol'):
                    path = os.path.join(root, filename)
                    source_paths.add(path)
                    self.log.debug(
                        "Collecting solidity source {}".format(path))

        # Compile with remappings: https://github.com/ethereum/py-solc
        zeppelin_dir = os.path.join(root_source_dir,
                                    self.__zeppelin_library_dir)
        aragon_dir = os.path.join(root_source_dir, self.__aragon_library_dir)

        remappings = (
            "contracts={}".format(contracts_dir),
            "zeppelin={}".format(zeppelin_dir),
            "aragon={}".format(aragon_dir),
        )

        self.log.info("Compiling with import remappings {}".format(
            ", ".join(remappings)))

        optimization_runs = self.optimization_runs

        try:
            compiled_sol = compile_files(source_files=source_paths,
                                         solc_binary=self.__sol_binary_path,
                                         import_remappings=remappings,
                                         allow_paths=root_source_dir,
                                         optimize=True,
                                         optimize_runs=optimization_runs)

            self.log.info(
                "Successfully compiled {} contracts with {} optimization runs".
                format(len(compiled_sol), optimization_runs))

        except FileNotFoundError:
            raise RuntimeError(
                "The solidity compiler is not at the specified path. "
                "Check that the file exists and is executable.")
        except PermissionError:
            raise RuntimeError(
                "The solidity compiler binary at {} is not executable. "
                "Check the file's permissions.".format(self.__sol_binary_path))

        except SolcError:
            raise

        # Cleanup the compiled data keys
        interfaces = {
            name.split(':')[-1]: compiled_sol[name]
            for name in compiled_sol
        }
        return interfaces
Esempio n. 6
0
class WorkTracker:

    CLOCK = reactor
    REFRESH_RATE = 60 * 15  # Fifteen minutes

    def __init__(self, worker, refresh_rate: int = None, *args, **kwargs):

        super().__init__(*args, **kwargs)
        self.log = Logger('stake-tracker')

        self.worker = worker
        self.staking_agent = self.worker.staking_agent

        self._refresh_rate = refresh_rate or self.REFRESH_RATE
        self._tracking_task = task.LoopingCall(self._do_work)
        self._tracking_task.clock = self.CLOCK

        self.__current_period = None
        self.__start_time = NOT_STAKING
        self.__uptime_period = NOT_STAKING
        self._abort_on_error = True

    @property
    def current_period(self):
        return self.__current_period

    def stop(self) -> None:
        self._tracking_task.stop()
        self.log.info(f"STOPPED WORK TRACKING")

    def start(self, act_now: bool = False, force: bool = False) -> None:
        """
        High-level stake tracking initialization, this function aims
        to be safely called at any time - For example, it is okay to call
        this function multiple times within the same period.
        """
        if self._tracking_task.running and not force:
            return

        # Record the start time and period
        self.__start_time = maya.now()
        self.__uptime_period = self.staking_agent.get_current_period()
        self.__current_period = self.__uptime_period

        d = self._tracking_task.start(interval=self._refresh_rate)
        d.addErrback(self.handle_working_errors)
        self.log.info(f"STARTED WORK TRACKING")

        if act_now:
            self._do_work()

    def _crash_gracefully(self, failure=None) -> None:
        """
        A facility for crashing more gracefully in the event that
        an exception is unhandled in a different thread.
        """
        self._crashed = failure
        failure.raiseException()

    def handle_working_errors(self, *args, **kwargs) -> None:
        failure = args[0]
        if self._abort_on_error:
            self.log.critical(
                f"Unhandled error during node work tracking. {failure}")
            reactor.callFromThread(self._crash_gracefully, failure=failure)
        else:
            self.log.warn(
                f"Unhandled error during work tracking: {failure.getTraceback()}"
            )

    def _do_work(self) -> None:
        # TODO: Check for stake expiration and exit
        # TODO: Follow-up actions for downtime

        # Update on-chain status
        self.log.info(
            f"Checking for new period. Current period is {self.__current_period}"
        )
        onchain_period = self.staking_agent.get_current_period(
        )  # < -- Read from contract
        if self.current_period != onchain_period:
            self.__current_period = onchain_period
            # self.worker.stakes.refresh()  # TODO: Track stakes

        # Measure working interval
        interval = onchain_period - self.worker.last_active_period
        if interval < 0:
            return  # No need to confirm this period.  Save the gas.
        if interval > 0:
            self.log.warn(
                f"MISSED CONFIRMATIONS - {interval} missed staking confirmations detected."
            )

        # Confirm Activity
        self.log.info("Confirmed activity for period {}".format(
            self.current_period))
        transacting_power = self.worker.transacting_power
        with transacting_power:
            self.worker.confirm_activity()  # < --- blockchain WRITE
Esempio n. 7
0

class FakeSerialPort(object):
    def __init__(self, protocol):
        log.warn("Starting the FakeSerialPort")
        self.protocol = protocol
        self.protocol.transport = self

    def write(self, data):
        log.info("FAKE WRITING #%d bytes" % len(data))


if __name__ == '__main__':
    log = Logger(__file__)
    config = Config(envvar_silent=False)
    scheduler = CreateService(Scheduler)
    scheduler.add_intermezzo(IntermezzoWipe)
    scheduler.add_intermezzo(IntermezzoInvaders)
    scheduler.add_intermezzo(IntermezzoPacman)
    led_screen = LEDScreen()
    serial_port = config.get('SERIAL_PORT')
    if serial_port == 'fake':
        log.warn("FAKE SERIAL SELECTED.")
        FakeSerialPort(led_screen)
    else:
        baudrate = config.get('SERIAL_BAUDRATE')
        log.info("REAL Serialport %s @ %s" % (serial_port, baudrate))
        RealSerialPort(led_screen, serial_port, reactor, baudrate=baudrate)
    scheduler.led_screen = led_screen
    reactor.run()
Esempio n. 8
0
def ursula(config, action, rest_port, rest_host, db_name, checksum_address,
           debug, teacher_uri, min_stake) -> None:
    """
    Manage and run an Ursula node

    Here is the procedure to "spin-up" an Ursula node.

    \b
        0. Validate CLI Input
        1. Initialize UrsulaConfiguration (from configuration file or inline)
        2. Initialize Ursula with Passphrase
        3. Initialize Staking Loop
        4. Run TLS deployment (Learning Loop + Reactor)

    """
    log = Logger("ursula/launch")

    password = os.environ.get(config._KEYRING_PASSPHRASE_ENVVAR, None)
    if not password:
        password = click.prompt("Password to unlock Ursula's keyring",
                                hide_input=True)

    def __make_ursula():
        if not checksum_address and not config.dev:
            raise click.BadArgumentUsage(
                "No Configuration file found, and no --checksum address <addr> was provided."
            )
        if not checksum_address and not config.dev:
            raise click.BadOptionUsage(
                message="No account specified. pass --checksum-address, --dev, "
                "or use a configuration file with --config-file <path>")

        return UrsulaConfiguration(temp=config.dev,
                                   auto_initialize=config.dev,
                                   is_me=True,
                                   rest_host=rest_host,
                                   rest_port=rest_port,
                                   db_name=db_name,
                                   federated_only=config.federated_only,
                                   registry_filepath=config.registry_filepath,
                                   provider_uri=config.provider_uri,
                                   checksum_address=checksum_address,
                                   poa=config.poa,
                                   save_metadata=False,
                                   load_metadata=True,
                                   start_learning_now=True,
                                   learn_on_same_thread=False,
                                   abort_on_learning_error=config.dev)

    #
    # Configure
    #
    overrides = dict()
    if config.dev:
        ursula_config = __make_ursula()
    else:
        try:
            filepath = config.config_file or UrsulaConfiguration.DEFAULT_CONFIG_FILE_LOCATION
            click.secho(
                "Reading Ursula node configuration file {}".format(filepath),
                fg='blue')
            ursula_config = UrsulaConfiguration.from_configuration_file(
                filepath=filepath)
        except FileNotFoundError:
            ursula_config = __make_ursula()

    config.operating_mode = "federated" if ursula_config.federated_only else "decentralized"
    click.secho("Running in {} mode".format(config.operating_mode), fg='blue')

    #
    # Seed
    #
    teacher_nodes = list()
    if teacher_uri:

        if '@' in teacher_uri:
            checksum_address, teacher_uri = teacher_uri.split("@")
            if not is_checksum_address(checksum_address):
                raise click.BadParameter(
                    "{} is not a valid checksum address.".format(
                        checksum_address))
        else:
            checksum_address = None  # federated

        # HTTPS Explicit Required
        parsed_teacher_uri = urlparse(teacher_uri)
        if not parsed_teacher_uri.scheme == "https":
            raise click.BadParameter(
                "Invalid teacher URI. Is the hostname prefixed with 'https://' ?"
            )

        port = parsed_teacher_uri.port or UrsulaConfiguration.DEFAULT_REST_PORT
        while not teacher_nodes:
            try:
                teacher = Ursula.from_seed_and_stake_info(
                    host=parsed_teacher_uri.hostname,
                    port=port,
                    federated_only=ursula_config.federated_only,
                    checksum_address=checksum_address,
                    minimum_stake=min_stake,
                    certificates_directory=ursula_config.known_certificates_dir
                )
                teacher_nodes.append(teacher)
            except (socket.gaierror, requests.exceptions.ConnectionError,
                    ConnectionRefusedError):
                log.warn("Can't connect to seed node.  Will retry.")
                time.sleep(5)

    #
    # Produce
    #
    try:
        URSULA = ursula_config.produce(passphrase=password,
                                       known_nodes=teacher_nodes,
                                       **overrides)  # 2
    except CryptoError:
        click.secho("Invalid keyring passphrase")
        return

    click.secho("Initialized Ursula {}".format(URSULA), fg='green')

    #
    # Run
    #
    if action == 'run':
        try:

            # GO!
            click.secho("Running Ursula on {}".format(URSULA.rest_interface),
                        fg='green',
                        bold=True)
            stdio.StandardIO(UrsulaCommandProtocol(ursula=URSULA))
            URSULA.get_deployer().run()

        except Exception as e:
            config.log.critical(str(e))
            click.secho("{} {}".format(e.__class__.__name__, str(e)), fg='red')
            if debug: raise
            raise click.Abort()
        finally:
            click.secho("Stopping Ursula")
            ursula_config.cleanup()
            click.secho("Ursula Stopped", fg='red')

    elif action == "save-metadata":
        metadata_path = URSULA.write_node_metadata(node=URSULA)
        click.secho(
            "Successfully saved node metadata to {}.".format(metadata_path),
            fg='green')

    else:
        raise click.BadArgumentUsage
Esempio n. 9
0
class OVInfoContent(GenericContent):
    def __init__(self, endpoint, factory):
        self.log = Logger(self.__class__.__name__)
        super().__init__(endpoint, factory)
        self.update_task = None
        self.publish_task = None
        self.urls = CircularBuffer(self.config['OVINFO_STOPAREA_URLS'])
        self.lines = Transports()

    def onBrokerConnected(self):
        self.update_task = task.LoopingCall(self.update_ov_info)
        # update_delay_time = float(60) / len(self.urls)
        update_delay_time = float(self.config['OVINFO_UPDATE_FREQ']) / len(self.urls)
        self.update_task.start(update_delay_time, now=True)
        self.publish_task = task.LoopingCall(self.publish_ov_info)
        self.publish_task.start(self.config['OVINFO_PUBLISH_FREQ'], now=True)
        # self.publish_task.start(15, now=True)

    def _logFailure(self, failure):
        self.log.debug("reported failure: {message}", message=failure.getErrorMessage())
        return failure

    def update_ov_info(self):
        d = treq.get(next(self.urls))
        d.addCallbacks(self.grab_http_response, self._logFailure)
        d.addCallbacks(self.parse_json_page, self._logFailure)
        d.addCallbacks(self.update_depature_info, self._logFailure)

    def publish_ov_info(self):
        lines = self.create_ov_display()
        return self.publish_ov_display(lines)

    def now(self) -> datetime:
        return datetime.now()

    def time_formatter(self, dt: datetime, now=None):
        now = self.now() if now is None else now
        t_diff = (dt - now).seconds  # time difference in seconds.
        if t_diff < 30*60:  # difference smaller then 30 minutes:
            return "{}m".format(round(t_diff/60))
        elif t_diff < 60*60:  # difference smaller then an hour
            return ":{:02d}".format(dt.minute)
        else:
            return "{:02d}:{:02d}".format(dt.hour, dt.minute)

    def create_ov_display(self):
        lines = []
        for line_nr, dest_code, passes in self.lines.pass_overview():
            if dest_code in DestinationCode_ignore:
                continue
            dest_name = DestinationCode_to_name.get(dest_code, dest_code)
            formatted_passes = " ".join(map(self.time_formatter, passes))
            display_str = "{:>3}{} {}".format(line_nr, dest_name, formatted_passes)
            lines.append(display_str)
        return lines

    def grab_http_response(self, response):
        if response.code != 200:
            raise RuntimeError("Status is not 200 but '%s'" % response.code)
        return readBody(response)

    def parse_json_page(self, content):
        prices = json.loads(content.decode())
        return prices

    def update_depature_info(self, data):
        dparser = date_parser()
        for trans in [x.value for x in JsonPathParser().parse('$..Passes.*').find(data)]:
            expectedTime = dparser.parse(trans['ExpectedArrivalTime'])
            destination_code = trans['DestinationCode']
            self.lines.add_pass(trans['LinePublicNumber'], destination_code,
                                trans['JourneyNumber'], expectedTime)
            if (destination_code not in DestinationCode_to_name
                    and destination_code not in DestinationCode_ignore):
                self.log.warn("Missing DestinationCode: %s = %s" % (destination_code, trans['DestinationName50']))
            # 'LinePublicNumber'  -- '2'
            # 'JourneyNumber' -- 8,
            # 'DestinationCode' -- 'NSN'
            # 'DestinationName50' --  'Nieuw Sloten'
            # 'ExpectedArrivalTime' -- '2017-12-17T00:35:15'

    def publish_ov_display(self, info_lines: list) -> Deferred:
        def _logAll(*args):
            self.log.debug("all publishing complete args={args!r}", args=args)
        if not info_lines:
            return
        msg = TextTripleLinesLayout()
        msg.lines = info_lines
        msg.line_duration = self.config["OVINFO_LINE_DELAY"]
        msg.valid_time = 60  # Information is only valid for a minute.
        msg.program = 'ovinfo'
        msg.size = '6x7'
        msg.lines = info_lines
        d = self.publish(topic=LEDSLIE_TOPIC_TYPESETTER_3LINES, message=msg, qos=1)
        d.addCallbacks(_logAll, self._logFailure)
        return d
Esempio n. 10
0
class Learner:
    """
    Any participant in the "learning loop" - a class inheriting from
    this one has the ability, synchronously or asynchronously,
    to learn about nodes in the network, verify some essential
    details about them, and store information about them for later use.
    """

    _SHORT_LEARNING_DELAY = 5
    _LONG_LEARNING_DELAY = 90
    LEARNING_TIMEOUT = 10
    _ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN = 10

    # For Keeps
    __DEFAULT_NODE_STORAGE = ForgetfulNodeStorage
    __DEFAULT_MIDDLEWARE_CLASS = RestMiddleware

    LEARNER_VERSION = LEARNING_LOOP_VERSION
    node_splitter = BytestringSplitter(VariableLengthBytestring)
    version_splitter = BytestringSplitter((int, 2, {"byteorder": "big"}))
    tracker_class = FleetStateTracker

    invalid_metadata_message = "{} has invalid metadata.  Maybe its stake is over?  Or maybe it is transitioning to a new interface.  Ignoring."
    unknown_version_message = "{} purported to be of version {}, but we're only version {}.  Is there a new version of NuCypher?"
    really_unknown_version_message = "Unable to glean address from node that perhaps purported to be version {}.  We're only version {}."
    fleet_state_icon = ""

    class NotEnoughNodes(RuntimeError):
        pass

    class NotEnoughTeachers(NotEnoughNodes):
        pass

    class UnresponsiveTeacher(ConnectionError):
        pass

    class NotATeacher(ValueError):
        """
        Raised when a character cannot be properly utilized because
        it does not have the proper attributes for learning or verification.
        """

    def __init__(
        self,
        domains: Set,
        network_middleware: RestMiddleware = __DEFAULT_MIDDLEWARE_CLASS(),
        start_learning_now: bool = False,
        learn_on_same_thread: bool = False,
        known_nodes: tuple = None,
        seed_nodes: Tuple[tuple] = None,
        node_storage=None,
        save_metadata: bool = False,
        abort_on_learning_error: bool = False,
        lonely: bool = False,
    ) -> None:

        self.log = Logger("learning-loop")  # type: Logger

        self.learning_domains = domains
        self.network_middleware = network_middleware
        self.save_metadata = save_metadata
        self.start_learning_now = start_learning_now
        self.learn_on_same_thread = learn_on_same_thread

        self._abort_on_learning_error = abort_on_learning_error
        self._learning_listeners = defaultdict(list)
        self._node_ids_to_learn_about_immediately = set()

        self.__known_nodes = self.tracker_class()

        self.lonely = lonely
        self.done_seeding = False

        # Read
        if node_storage is None:
            node_storage = self.__DEFAULT_NODE_STORAGE(
                federated_only=self.federated_only,
                # TODO: remove federated_only
                character_class=self.__class__)

        self.node_storage = node_storage
        if save_metadata and node_storage is NO_STORAGE_AVAILIBLE:
            raise ValueError(
                "Cannot save nodes without a configured node storage")

        known_nodes = known_nodes or tuple()
        self.unresponsive_startup_nodes = list(
        )  # TODO: Attempt to use these again later
        for node in known_nodes:
            try:
                self.remember_node(
                    node
                )  # TODO: Need to test this better - do we ever init an Ursula-Learner with Node Storage?
            except self.UnresponsiveTeacher:
                self.unresponsive_startup_nodes.append(node)

        self.teacher_nodes = deque()
        self._current_teacher_node = None  # type: Teacher
        self._learning_task = task.LoopingCall(self.keep_learning_about_nodes)
        self._learning_round = 0  # type: int
        self._rounds_without_new_nodes = 0  # type: int
        self._seed_nodes = seed_nodes or []
        self.unresponsive_seed_nodes = set()

        if self.start_learning_now:
            self.start_learning_loop(now=self.learn_on_same_thread)

    @property
    def known_nodes(self):
        return self.__known_nodes

    def load_seednodes(self,
                       read_storages: bool = True,
                       retry_attempts: int = 3):  # TODO: why are these unused?
        """
        Engage known nodes from storages and pre-fetch hardcoded seednode certificates for node learning.
        """
        if self.done_seeding:
            self.log.debug("Already done seeding; won't try again.")
            return

        from nucypher.characters.lawful import Ursula
        for seednode_metadata in self._seed_nodes:
            self.log.debug("Seeding from: {}|{}:{}".format(
                seednode_metadata.checksum_public_address,
                seednode_metadata.rest_host, seednode_metadata.rest_port))

            seed_node = Ursula.from_seednode_metadata(
                seednode_metadata=seednode_metadata,
                network_middleware=self.network_middleware,
                federated_only=self.federated_only)  # TODO: 466
            if seed_node is False:
                self.unresponsive_seed_nodes.add(seednode_metadata)
            else:
                self.unresponsive_seed_nodes.discard(seednode_metadata)
                self.remember_node(seed_node)

        if not self.unresponsive_seed_nodes:
            self.log.info("Finished learning about all seednodes.")

        self.done_seeding = True

        if read_storages is True:
            self.read_nodes_from_storage()

        if not self.known_nodes:
            self.log.warn(
                "No seednodes were available after {} attempts".format(
                    retry_attempts))
            # TODO: Need some actual logic here for situation with no seed nodes (ie, maybe try again much later)

    def read_nodes_from_storage(self) -> set:
        stored_nodes = self.node_storage.all(
            federated_only=self.federated_only)  # TODO: 466
        for node in stored_nodes:
            self.remember_node(node)

    def remember_node(self,
                      node,
                      force_verification_check=False,
                      record_fleet_state=True):

        if node == self:  # No need to remember self.
            return False

        # First, determine if this is an outdated representation of an already known node.
        with suppress(KeyError):
            already_known_node = self.known_nodes[node.checksum_public_address]
            if not node.timestamp > already_known_node.timestamp:
                self.log.debug("Skipping already known node {}".format(
                    already_known_node))
                # This node is already known.  We can safely return.
                return False

        try:
            stranger_certificate = node.certificate
        except AttributeError:
            # Whoops, we got an Alice, Bob, or someone...
            raise self.NotATeacher(
                f"{node.__class__.__name__} does not have a certificate and cannot be remembered."
            )

        # Store node's certificate - It has been seen.
        certificate_filepath = self.node_storage.store_node_certificate(
            certificate=stranger_certificate)

        # In some cases (seed nodes or other temp stored certs),
        # this will update the filepath from the temp location to this one.
        node.certificate_filepath = certificate_filepath
        self.log.info(
            f"Saved TLS certificate for {node.nickname}: {certificate_filepath}"
        )

        try:
            node.verify_node(
                force=force_verification_check,
                network_middleware=self.network_middleware,
                accept_federated_only=self.federated_only,
                # TODO: 466 - move federated-only up to Learner?
            )
        except SSLError:
            return False  # TODO: Bucket this node as having bad TLS info - maybe it's an update that hasn't fully propagated?

        except NodeSeemsToBeDown:
            return False  # TODO: Bucket this node as "ghost" or something: somebody else knows about it, but we can't get to it.

        listeners = self._learning_listeners.pop(node.checksum_public_address,
                                                 tuple())
        address = node.checksum_public_address

        self.known_nodes[address] = node

        if self.save_metadata:
            self.node_storage.store_node_metadata(node=node)
        #self.log.info("Remembering {} ({}), popping {} listeners.".format(node.nickname, node.checksum_public_address, len(listeners)))
        for listener in listeners:
            listener.add(address)
        self._node_ids_to_learn_about_immediately.discard(address)

        if record_fleet_state:
            self.known_nodes.record_fleet_state()

        return node

    def start_learning_loop(self, now=False):
        if self._learning_task.running:
            return False
        elif now:
            self.log.info("Starting Learning Loop NOW.")

            if self.lonely:
                self.done_seeding = True
                self.read_nodes_from_storage()

            else:
                self.load_seednodes()

            self.learn_from_teacher_node()
            self.learning_deferred = self._learning_task.start(
                interval=self._SHORT_LEARNING_DELAY)
            self.learning_deferred.addErrback(self.handle_learning_errors)
            return self.learning_deferred
        else:
            self.log.info("Starting Learning Loop.")

            learning_deferreds = list()
            if not self.lonely:
                seeder_deferred = deferToThread(self.load_seednodes)
                seeder_deferred.addErrback(self.handle_learning_errors)
                learning_deferreds.append(seeder_deferred)

            learner_deferred = self._learning_task.start(
                interval=self._SHORT_LEARNING_DELAY, now=now)
            learner_deferred.addErrback(self.handle_learning_errors)
            learning_deferreds.append(learner_deferred)

            self.learning_deferred = defer.DeferredList(learning_deferreds)
            return self.learning_deferred

    def stop_learning_loop(self, reason=None):
        """
        Only for tests at this point.  Maybe some day for graceful shutdowns.
        """
        self._learning_task.stop()

    def handle_learning_errors(self, *args, **kwargs):
        failure = args[0]
        if self._abort_on_learning_error:
            self.log.critical(
                "Unhandled error during node learning.  Attempting graceful crash."
            )
            reactor.callFromThread(self._crash_gracefully, failure=failure)
        else:
            self.log.warn("Unhandled error during node learning: {}".format(
                failure.getTraceback()))
            if not self._learning_task.running:
                self.start_learning_loop(
                )  # TODO: Consider a single entry point for this with more elegant pause and unpause.

    def _crash_gracefully(self, failure=None):
        """
        A facility for crashing more gracefully in the event that an exception
        is unhandled in a different thread, especially inside a loop like the learning loop.
        """
        self._crashed = failure
        failure.raiseException()
        # TODO: We don't actually have checksum_public_address at this level - maybe only Characters can crash gracefully :-)
        self.log.critical("{} crashed with {}".format(
            self.checksum_public_address, failure))

    def select_teacher_nodes(self):
        nodes_we_know_about = self.known_nodes.shuffled()

        if not nodes_we_know_about:
            raise self.NotEnoughTeachers(
                "Need some nodes to start learning from.")

        self.teacher_nodes.extend(nodes_we_know_about)

    def cycle_teacher_node(self):
        # To ensure that all the best teachers are available, first let's make sure
        # that we have connected to all the seed nodes.
        if self.unresponsive_seed_nodes and not self.lonely:
            self.log.info(
                "Still have unresponsive seed nodes; trying again to connect.")
            self.load_seednodes()  # Ideally, this is async and singular.

        if not self.teacher_nodes:
            self.select_teacher_nodes()
        try:
            self._current_teacher_node = self.teacher_nodes.pop()
        except IndexError:
            error = "Not enough nodes to select a good teacher, Check your network connection then node configuration"
            raise self.NotEnoughTeachers(error)
        self.log.info("Cycled teachers; New teacher is {}".format(
            self._current_teacher_node))

    def current_teacher_node(self, cycle=False):
        if cycle:
            self.cycle_teacher_node()

        if not self._current_teacher_node:
            self.cycle_teacher_node()

        teacher = self._current_teacher_node

        return teacher

    def learn_about_nodes_now(self, force=False):
        if self._learning_task.running:
            self._learning_task.reset()
            self._learning_task()
        elif not force:
            self.log.warn(
                "Learning loop isn't started; can't learn about nodes now.  You can override this with force=True."
            )
        elif force:
            self.log.info("Learning loop wasn't started; forcing start now.")
            self._learning_task.start(self._SHORT_LEARNING_DELAY, now=True)

    def keep_learning_about_nodes(self):
        """
        Continually learn about new nodes.
        """
        # TODO: Allow the user to set eagerness?
        self.learn_from_teacher_node(eager=False)

    def learn_about_specific_nodes(self, addresses: Set):
        self._node_ids_to_learn_about_immediately.update(addresses)  # hmmmm
        self.learn_about_nodes_now()

    # TODO: Dehydrate these next two methods.

    def block_until_number_of_known_nodes_is(
            self,
            number_of_nodes_to_know: int,
            timeout: int = 10,
            learn_on_this_thread: bool = False):
        start = maya.now()
        starting_round = self._learning_round

        while True:
            rounds_undertaken = self._learning_round - starting_round
            if len(self.__known_nodes) >= number_of_nodes_to_know:
                if rounds_undertaken:
                    self.log.info(
                        "Learned about enough nodes after {} rounds.".format(
                            rounds_undertaken))
                return True

            if not self._learning_task.running:
                self.log.warn(
                    "Blocking to learn about nodes, but learning loop isn't running."
                )
            if learn_on_this_thread:
                try:
                    self.learn_from_teacher_node(eager=True)
                except (requests.exceptions.ReadTimeout,
                        requests.exceptions.ConnectTimeout):
                    # TODO: Even this "same thread" logic can be done off the main thread.
                    self.log.warn(
                        "Teacher was unreachable.  No good way to handle this on the main thread."
                    )

            # The rest of the f*****g owl
            if (maya.now() - start).seconds > timeout:
                if not self._learning_task.running:
                    raise RuntimeError(
                        "Learning loop is not running.  Start it with start_learning()."
                    )
                else:
                    raise self.NotEnoughNodes(
                        "After {} seconds and {} rounds, didn't find {} nodes".
                        format(timeout, rounds_undertaken,
                               number_of_nodes_to_know))
            else:
                time.sleep(.1)

    def block_until_specific_nodes_are_known(self,
                                             addresses: Set,
                                             timeout=LEARNING_TIMEOUT,
                                             allow_missing=0,
                                             learn_on_this_thread=False):
        start = maya.now()
        starting_round = self._learning_round

        while True:
            if self._crashed:
                return self._crashed
            rounds_undertaken = self._learning_round - starting_round
            if addresses.issubset(self.known_nodes.addresses()):
                if rounds_undertaken:
                    self.log.info(
                        "Learned about all nodes after {} rounds.".format(
                            rounds_undertaken))
                return True

            if not self._learning_task.running:
                self.log.warn(
                    "Blocking to learn about nodes, but learning loop isn't running."
                )
            if learn_on_this_thread:
                self.learn_from_teacher_node(eager=True)

            if (maya.now() - start).seconds > timeout:

                still_unknown = addresses.difference(
                    self.known_nodes.addresses())

                if len(still_unknown) <= allow_missing:
                    return False
                elif not self._learning_task.running:
                    raise self.NotEnoughTeachers(
                        "The learning loop is not running.  Start it with start_learning()."
                    )
                else:
                    raise self.NotEnoughTeachers(
                        "After {} seconds and {} rounds, didn't find these {} nodes: {}"
                        .format(timeout, rounds_undertaken, len(still_unknown),
                                still_unknown))
            else:
                time.sleep(.1)

    def _adjust_learning(self, node_list):
        """
        Takes a list of new nodes, adjusts learning accordingly.

        Currently, simply slows down learning loop when no new nodes have been discovered in a while.
        TODO: Do other important things - scrub, bucket, etc.
        """
        if node_list:
            self._rounds_without_new_nodes = 0
            self._learning_task.interval = self._SHORT_LEARNING_DELAY
        else:
            self._rounds_without_new_nodes += 1
            if self._rounds_without_new_nodes > self._ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN:
                self.log.info(
                    "After {} rounds with no new nodes, it's time to slow down to {} seconds."
                    .format(
                        self._ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN,
                        self._LONG_LEARNING_DELAY))
                self._learning_task.interval = self._LONG_LEARNING_DELAY

    def _push_certain_newly_discovered_nodes_here(self, queue_to_push,
                                                  node_addresses):
        """
        If any node_addresses are discovered, push them to queue_to_push.
        """
        for node_address in node_addresses:
            self.log.info("Adding listener for {}".format(node_address))
            self._learning_listeners[node_address].append(queue_to_push)

    def network_bootstrap(self, node_list: list) -> None:
        for node_addr, port in node_list:
            new_nodes = self.learn_about_nodes_now(node_addr, port)
            self.__known_nodes.update(new_nodes)

    def get_nodes_by_ids(self, node_ids):
        for node_id in node_ids:
            try:
                # Scenario 1: We already know about this node.
                return self.__known_nodes[node_id]
            except KeyError:
                raise NotImplementedError
        # Scenario 2: We don't know about this node, but a nearby node does.
        # TODO: Build a concurrent pool of lookups here.

        # Scenario 3: We don't know about this node, and neither does our friend.

    def write_node_metadata(self, node, serializer=bytes) -> str:
        return self.node_storage.store_node_metadata(node=node)

    def learn_from_teacher_node(self, eager=True):
        """
        Sends a request to node_url to find out about known nodes.
        """
        self._learning_round += 1

        try:
            current_teacher = self.current_teacher_node()
        except self.NotEnoughTeachers as e:
            self.log.warn("Can't learn right now: {}".format(e.args[0]))
            return

        if Teacher in self.__class__.__bases__:
            announce_nodes = [self]
        else:
            announce_nodes = None

        unresponsive_nodes = set()
        try:
            # TODO: Streamline path generation
            certificate_filepath = self.node_storage.generate_certificate_filepath(
                checksum_address=current_teacher.checksum_public_address)
            response = self.network_middleware.get_nodes_via_rest(
                node=current_teacher,
                nodes_i_need=self._node_ids_to_learn_about_immediately,
                announce_nodes=announce_nodes,
                fleet_checksum=self.known_nodes.checksum)
        except NodeSeemsToBeDown as e:
            unresponsive_nodes.add(current_teacher)
            self.log.info("Bad Response from teacher: {}:{}.".format(
                current_teacher, e))
            return
        finally:
            self.cycle_teacher_node()

        #
        # Before we parse the response, let's handle some edge cases.
        if response.status_code == 204:
            # In this case, this node knows about no other nodes.  Hopefully we've taught it something.
            if response.content == b"":
                return NO_KNOWN_NODES
            # In the other case - where the status code is 204 but the repsonse isn't blank - we'll keep parsing.
            # It's possible that our fleet states match, and we'll check for that later.

        elif response.status_code != 200:
            self.log.info("Bad response from teacher {}: {} - {}".format(
                current_teacher, response, response.content))
            return

        try:
            signature, node_payload = signature_splitter(response.content,
                                                         return_remainder=True)
        except BytestringSplittingError as e:
            self.log.warn(e.args[0])
            return

        try:
            self.verify_from(current_teacher,
                             node_payload,
                             signature=signature)
        except current_teacher.InvalidSignature:
            # TODO: What to do if the teacher improperly signed the node payload?
            raise
        # End edge case handling.
        #

        fleet_state_checksum_bytes, fleet_state_updated_bytes, node_payload = FleetStateTracker.snapshot_splitter(
            node_payload, return_remainder=True)
        current_teacher.last_seen = maya.now()
        # TODO: This is weird - let's get a stranger FleetState going.
        checksum = fleet_state_checksum_bytes.hex()

        # TODO: This doesn't make sense - a decentralized node can still learn about a federated-only node.
        from nucypher.characters.lawful import Ursula
        if constant_or_bytes(node_payload) is FLEET_STATES_MATCH:
            current_teacher.update_snapshot(
                checksum=checksum,
                updated=maya.MayaDT(
                    int.from_bytes(fleet_state_updated_bytes,
                                   byteorder="big")),
                number_of_known_nodes=len(self.known_nodes))
            return FLEET_STATES_MATCH

        node_list = Ursula.batch_from_bytes(
            node_payload, federated_only=self.federated_only)  # TODO: 466

        current_teacher.update_snapshot(checksum=checksum,
                                        updated=maya.MayaDT(
                                            int.from_bytes(
                                                fleet_state_updated_bytes,
                                                byteorder="big")),
                                        number_of_known_nodes=len(node_list))

        new_nodes = []
        for node in node_list:
            if GLOBAL_DOMAIN not in self.learning_domains:
                if not self.learning_domains.intersection(
                        node.serving_domains):
                    continue  # This node is not serving any of our domains.

            # First, determine if this is an outdated representation of an already known node.
            with suppress(KeyError):
                already_known_node = self.known_nodes[
                    node.checksum_public_address]
                if not node.timestamp > already_known_node.timestamp:
                    self.log.debug("Skipping already known node {}".format(
                        already_known_node))
                    # This node is already known.  We can safely continue to the next.
                    continue

            certificate_filepath = self.node_storage.store_node_certificate(
                certificate=node.certificate)

            try:
                if eager:
                    node.verify_node(
                        self.network_middleware,
                        accept_federated_only=self.federated_only,  # TODO: 466
                        certificate_filepath=certificate_filepath)
                    self.log.debug("Verified node: {}".format(
                        node.checksum_public_address))

                else:
                    node.validate_metadata(
                        accept_federated_only=self.federated_only)  # TODO: 466
            # This block is a mess of eagerness.  This can all be done better lazily.
            except NodeSeemsToBeDown as e:
                self.log.info(
                    f"Can't connect to {node} to verify it right now.")
            except node.InvalidNode:
                # TODO: Account for possibility that stamp, rather than interface, was bad.
                self.log.warn(node.invalid_metadata_message.format(node))
            except node.SuspiciousActivity:
                message = "Suspicious Activity: Discovered node with bad signature: {}.  " \
                          "Propagated by: {}".format(current_teacher.checksum_public_address, teacher_uri)
                self.log.warn(message)
            else:
                new = self.remember_node(node, record_fleet_state=False)
                if new:
                    new_nodes.append(node)

        self._adjust_learning(new_nodes)

        learning_round_log_message = "Learning round {}.  Teacher: {} knew about {} nodes, {} were new."
        self.log.info(
            learning_round_log_message.format(self._learning_round,
                                              current_teacher, len(node_list),
                                              len(new_nodes)), )
        if new_nodes:
            self.known_nodes.record_fleet_state()
            for node in new_nodes:
                self.node_storage.store_node_certificate(
                    certificate=node.certificate)
        return new_nodes
class RuntimeCalculator:
    def __init__(self, lock, addr='localhost', port='6800'):
        config = Config()
        self.lock = lock
        self.highest_level = glv.get_value(key='top_level')
        self.user_name = config.get('auth_username', '')
        self.user_password = config.get('auth_password', '')
        self.clear_at_start = config.get('clear_up_database_when_start', 'yes')
        self.observation_times = int(config.get('observation_times', '20'))
        self.strict_mode = config.get('strict_mode', 'no')
        self.strict_degree = int(config.get('strict_degree',
                                            '4'))  # 严格模式的严格程度,取值大于零,数值越小越严格
        self.db = glv.get_value(key='mysql_db')
        self.mysql_host = config.get('mysql_host', '127.0.0.1')
        self.mysql_port = config.get('mysql_port', '3306')
        self.mysql_user = config.get('mysql_user', 'root')
        self.mysql_db = config.get('mysql_db', 'scrapydartdb')
        self.runtime_log = Logger(namespace='- Runtime Collector -')
        self.terminator_log = Logger(namespace='- TERMINATOR -')
        self.sep_time = 1 * 60  # 每次收集时间间隔 1 分钟
        self.terminator_scan_sep = 20
        self.server_port = 'http://{}:{}/'.format(addr, port)
        self.jobs_url = self.server_port + 'listjobs.json'

    def list_the_spiders(self, spider_list):
        dic = dict()
        if spider_list:
            for spider_dic in spider_list:
                spider_name = [x for x in spider_dic.keys()][0]
                runtime = int([x for x in spider_dic.values()][0])
                if dic.get(spider_name):
                    dic[spider_name].append(runtime)
                else:
                    dic[spider_name] = list()
                    dic[spider_name].append(runtime)
        return dic

    def unusual_spider(self,
                       project,
                       name_of_spider,
                       runtime_of_spider,
                       save_to_database=True):
        where_str = ' where project="{}"'.format(project)
        self.lock.acquire()
        res_from_db = self.db.get_result(model=SpiderScheduleModel,
                                         fields=['project', 'spider'],
                                         where_dic={'status': '=*3'})
        data = self.db.get_spider_runtime(field=['spider', 'runtime'],
                                          where=where_str)
        self.lock.release()
        top_set = {'{}-{}'.format(x.project, x.spider) for x in res_from_db}
        item = '{}-{}'.format(project, name_of_spider)
        if item in top_set:
            return -1100
        spider_list_temp = [{
            x.spider: x.runtime
        } for x in data] if data else []
        spider_dic_temp = self.list_the_spiders(spider_list_temp)
        over_time = -1000
        if spider_dic_temp:
            time_list = spider_dic_temp.get(name_of_spider)
            if time_list and len(time_list) > self.observation_times:
                std = np.std(time_list, ddof=1)
                if self.strict_mode == "yes":
                    time_list_set = set(time_list)
                    expectation = sum([
                        x * (time_list.count(x) / len(time_list))
                        for x in time_list_set
                    ])  # 数学期望
                    over_time = runtime_of_spider - (
                        std * self.strict_degree + expectation
                    )  # 严格模式,样本偏差加上数学期望
                else:
                    over_time = runtime_of_spider - (std + max(time_list)
                                                     )  # 非严格模式,样本偏差加上最大值,确保不误杀
                if over_time > 0:
                    if save_to_database:
                        self.lock.acquire()
                        unusual_spider_data = self.db.get_unormal_spider(
                            field=['spider'])
                        self.lock.release()
                        unusual_spiders_set = set([
                            x.spider for x in unusual_spider_data
                        ]) if unusual_spider_data else {}
                        if name_of_spider not in unusual_spiders_set:
                            self.lock.acquire()
                            self.db.insert_data(model=UnormalSpider,
                                                field_names='spider',
                                                values=name_of_spider)
                            self.lock.release()
        return over_time

    def save_spider_runtime(self):
        if self.clear_at_start == 'yes':
            self.lock.acquire()
            self.db.del_data(model=SpiderMonitor)
            self.db.del_data(model=UnormalSpider)
            self.db.del_data(model=TerminatedSpider)
            self.lock.release()
            self.runtime_log.warn(
                'spider running recorder database has been clean up')
            self.runtime_log.info('database type: mysql')
            mysql_info = '{}@{}:{}/{}'.format(self.mysql_user, self.mysql_host,
                                              self.mysql_port, self.mysql_db)
            self.runtime_log.info('database info: {}'.format(mysql_info))
            self.runtime_log.info('each spider observation times: {}'.format(
                self.observation_times))
            self.runtime_log.info(
                'is unusual spider runtime calculation in strict mode: {}'.
                format(self.strict_mode))
            if self.strict_mode == 'yes':
                self.runtime_log.info('strict mode value: {}'.format(
                    self.strict_degree))
        time.sleep(3)
        while True:
            self.lock.acquire()
            job_res = self.db.get_result(model=SpiderMonitor,
                                         fields=['job_id'],
                                         return_model_map=True)
            self.lock.release()
            job_ids = set([x.job_id for x in job_res]) if job_res else set()
            save_sta = False
            for s_lis in self.runtime_monitor():
                if s_lis:
                    project, spider_name, runtime, job_id = s_lis
                    if self.unusual_spider(
                            project, spider_name, runtime,
                            save_to_database=False) <= 0:
                        self.database_limit_ctrl(
                            model=SpiderMonitor,
                            where_dic={
                                'spider': '=*{}'.format(spider_name),
                                'project': '=*{}'.format(project)
                            },
                            limit=1000)
                        if job_id not in job_ids:
                            self.lock.acquire()
                            self.db.insert_data(
                                model=SpiderMonitor,
                                field_names=[
                                    'project', 'spider', 'runtime', 'job_id'
                                ],
                                values=[project, spider_name, runtime, job_id])
                            self.lock.release()
                            save_sta = True
            if save_sta:
                self.runtime_log.info('spider runtime saved')
            time.sleep(self.sep_time)

    def database_limit_ctrl(self, model, where_dic, limit=1000):
        self.lock.acquire(blocking=True)
        res = self.db.get_result(model=model,
                                 fields=['id', 'create_time'],
                                 where_dic=where_dic,
                                 return_model_map=True)
        self.lock.release()
        if res:
            id_res = [x.id for x in res]
            id_res.sort(reverse=True)
            if len(id_res) > limit:
                if limit > 100:
                    limit = random.randint(100, limit)
                remove_ids = id_res[limit:]
                if remove_ids:
                    self.lock.acquire()
                    self.db.del_data(model=model, where_dic={'id': remove_ids})
                    self.lock.release()
            time_res = {
                str(TP(y.create_time.strftime("%Y-%m-%d %H:%M:%S"))): y.id
                for y in res
            }
            over_time = 30 * 24 * 60 * 60
            rm_lis = [time_res.get(t) for t in time_res if int(t) > over_time]
            if rm_lis:
                self.lock.acquire()
                self.db.del_data(model=model, where_dic={'id': rm_lis})
                self.lock.release()

    def runtime_monitor(self, req_spider=''):
        res = requests.get(url=self.jobs_url)
        spider_list = list()
        spiders_dic = dict()
        if res:
            rank_list = json.loads(res.content).get('finished')
            if rank_list:
                for each_spider in rank_list:
                    project = each_spider.get('project')
                    spider_name = each_spider.get('spider')
                    job_id = each_spider.get('id')
                    s_time = each_spider.get('start_time').split('.')[0]
                    e_time = each_spider.get('end_time').split('.')[0]
                    runtime = TPT(s_time, e_time)
                    spider_list.append([project, spider_name, runtime, job_id])
                    if not spiders_dic.get(spider_name):
                        spiders_dic[spider_name] = list()
                        spiders_dic[spider_name].append(runtime)
                    else:
                        spiders_dic[spider_name].append(runtime)
        if req_spider and spiders_dic:
            return sum(spiders_dic.get(req_spider)) // len(
                spiders_dic.get(req_spider))
        return spider_list

    def time_format(self, strtime):
        strtime = str(strtime)

        times = [x.strip() for x in strtime.split('d')]
        if len(times) > 1:
            d = int(times[0])
            hms = times[1]
        else:
            d = 0
            hms = times[0]

        h, m, s = [int(x.strip()) for x in hms.split(":") if x and x.strip()]
        seconds = d * 24 * 60 * 60 + h * 60 * 60 + m * 60 + s
        return seconds

    def terminator(self):
        time.sleep(3)
        self.terminator_log.info('Terminator Started')
        while True:
            res = json.loads(requests.get(url=self.jobs_url).content)
            if res.get('status') == 'ok':
                running_spiders = res.get('running')
                if running_spiders:
                    kill_lis = list()
                    for running_spider in running_spiders:
                        project = running_spider.get('project')
                        spider = running_spider.get('spider')
                        job_id = running_spider.get('id')
                        PID = running_spider.get('pid')
                        start_time = running_spider.get('start_time')
                        time_passed = self.time_passed(start_time)
                        if self.unusual_spider(project=project,
                                               name_of_spider=spider,
                                               runtime_of_spider=time_passed,
                                               save_to_database=False) > 0:
                            self.terminator_log.info(
                                'Unusual spider detected! ')
                            if project and job_id:
                                term = threading.Thread(
                                    target=self.kill_spider,
                                    args=(project, job_id, spider, PID))
                                term.setDaemon(True)
                                term.start()
                                kill_lis.append("{}-{}-{}".format(
                                    project, spider, job_id))
                            else:
                                p_name = '<project name> ' if not project else '[{}]'.format(
                                    project)
                                j_id = '<job id>' if not job_id else '[{}]'.format(
                                    job_id)
                                missing_data = p_name + j_id
                                self.terminator_log.warn(
                                    'Target info {} missing , Unable to locate the target!'
                                    .format(missing_data))
                                pass
                    ter_msg = 'Scan completed'
                    if kill_lis:
                        ter_msg += ', Terminated target: {}'.format(
                            str(kill_lis))
                    self.terminator_log.warn(ter_msg)
            time.sleep(self.terminator_scan_sep)

    def kill_spider(self, project, job_id, spider, PID):
        kill_url = os.path.join(self.server_port, 'cancel.json')
        self.terminator_log.warn('\n\n\tTarget Found! >>> {}  {} <<<\n'.format(
            spider, job_id))
        self.terminator_log.warn(
            'terminate the spider "{}" within 3 seconds'.format(spider))
        time.sleep(2)
        self.terminator_log.warn('sending terminate signal...')
        body = {"project": project, "job": job_id}
        try:
            target_killed = False
            for _ in range(2):
                res = json.loads(
                    requests.post(url=kill_url, data=body).content)
                self.terminator_log.warn(
                    'terminate signal has been sanded [{}]'.format(_))
                kill_status = res.get('status')
                kill_prevstate = res.get('prevstate')
                if kill_status == 'ok' and kill_prevstate not in {
                        'running', 'pending'
                }:
                    target_killed = True
                    break
                time.sleep(0.5)
            if target_killed:
                self.terminator_log.warn(
                    'Target [ {} ] has been terminated {}\n'.format(
                        spider,
                        time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
                self.lock.acquire()
                self.db.insert_data(model=TerminatedSpider,
                                    field_names=['spider', 'job_id'],
                                    values=[spider, job_id])
                self.lock.release()
            else:
                raise ValueError(
                    '\t[- TERMINATOR -] >> Signal sended, but the target still running'
                )
        except Exception as E:
            self.terminator_log.warn(
                'sth goes wrong when sending the terminate signal : {}'.format(
                    E))
            self.terminator_log.warn('trying to terminate it with PID...')
            try:
                os.kill(int(PID), signal.SIGKILL)
                self.lock.acquire()
                self.db.insert_data(model=TerminatedSpider,
                                    field_names=['spider', 'job_id'],
                                    values=[spider, job_id])
                self.lock.release()
                self.terminator_log.warn(
                    'Target [{}] has been terminated {}\n'.format(
                        spider,
                        time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
            except:
                os.popen('taskkill.exe /pid:' + str(PID))
                self.lock.acquire()
                self.db.insert_data(model=TerminatedSpider,
                                    field_names=['spider', 'job_id'],
                                    values=[spider, job_id])
                self.lock.release()
                self.terminator_log.warn(
                    'Target [{}] has been terminated {}\n'.format(
                        spider,
                        time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))

    def time_passed(self, date_time):
        date_time = date_time.strip()
        if len(date_time.split(' ')) < 2:
            date_time = date_time + " 00:00:00"
        last_news_date = date_time.split(" ")[0].split('-')
        last_news_time = date_time.split(" ")[1].split(':')
        for t in last_news_time:
            last_news_date.append(t)
        ls = [
            int(x) if '.' not in x else int(float(x)) for x in last_news_date
        ]
        secs = "(datetime.datetime.now() - " \
               "datetime.datetime({},{},{},{},{},{})).total_seconds()".format(ls[0], ls[1], ls[2], ls[3], ls[4], ls[5])
        secs = round(eval(secs))
        return secs
Esempio n. 12
0
class IRCd(Service):
	def __init__(self, configFileName):
		self.config = Config(self, configFileName)
		
		self.boundPorts = {}
		self.loadedModules = {}
		self._loadedModuleData = {}
		self._unloadingModules = {}
		self.commonModules = set()
		self.userCommands = {}
		self.serverCommands = {}
		self.channelModes = ({}, {}, {}, {})
		self.channelStatuses = {}
		self.channelStatusSymbols = {}
		self.channelStatusOrder = []
		self.channelModeTypes = {}
		self.userModes = ({}, {}, {}, {})
		self.userModeTypes = {}
		self.actions = {}
		self.storage = None
		self.storageSyncer = None
		self.dataCache = {}
		self.functionCache = {}
		
		self.serverID = None
		self.name = None
		self.isupport_tokens = {
			"CASEMAPPING": "strict-rfc1459",
			"CHANTYPES": "#",
		}
		self._uid = self._genUID()
		
		self.users = {}
		self.userNicks = CaseInsensitiveDictionary()
		self.channels = CaseInsensitiveDictionary(WeakValueDictionary)
		self.servers = {}
		self.serverNames = CaseInsensitiveDictionary()
		self.recentlyQuitUsers = {}
		self.recentlyQuitServers = {}
		self.recentlyDestroyedChannels = CaseInsensitiveDictionary()
		self.pruneRecentlyQuit = None
		self.pruneRecentChannels = None
		
		self._logFilter = LogLevelFilterPredicate()
		filterObserver = FilteringLogObserver(globalLogPublisher, (self._logFilter,))
		self.log = Logger("txircd", observer=filterObserver)
		
		self.startupTime = None
	
	def startService(self):
		self.log.info("Starting up...")
		self.startupTime = now()
		self.log.info("Loading configuration...")
		self.config.reload()
		self.name = self.config["server_name"]
		self.serverID = self.config["server_id"]
		self.log.info("Loading storage...")
		self.storage = shelve.open(self.config["datastore_path"], writeback=True)
		self.storageSyncer = LoopingCall(self.storage.sync)
		self.storageSyncer.start(self.config.get("storage_sync_interval", 5), now=False)
		self.log.info("Starting processes...")
		self.pruneRecentlyQuit = LoopingCall(self.pruneQuit)
		self.pruneRecentlyQuit.start(10, now=False)
		self.pruneRecentChannels = LoopingCall(self.pruneChannels)
		self.pruneRecentChannels.start(15, now=False)
		self.log.info("Loading modules...")
		self._loadModules()
		self.log.info("Binding ports...")
		self._bindPorts()
		self.log.info("txircd started!")
		try:
			self._logFilter.setLogLevelForNamespace("txircd", LogLevel.levelWithName(self.config["log_level"]))
		except (KeyError, InvalidLogLevelError):
			self._logFilter.setLogLevelForNamespace("txircd", LogLevel.warn)
		self.runActionStandard("startup")
	
	def stopService(self):
		stopDeferreds = []
		self.log.info("Disconnecting servers...")
		serverList = self.servers.values() # Take the list of server objects
		self.servers = {} # And then destroy the server dict to inhibit server objects generating lots of noise
		for server in serverList:
			if server.nextClosest == self.serverID:
				stopDeferreds.append(server.disconnectedDeferred)
				allUsers = self.users.keys()
				for user in allUsers:
					if user[:3] == server.serverID:
						del self.users[user]
				server.transport.loseConnection()
		self.log.info("Disconnecting users...")
		userList = self.users.values() # Basically do the same thing I just did with the servers
		self.users = {}
		for user in userList:
			if user.transport:
				stopDeferreds.append(user.disconnectedDeferred)
				user.transport.loseConnection()
		self.log.info("Unloading modules...")
		moduleList = self.loadedModules.keys()
		for module in moduleList:
			self._unloadModule(module, False) # Incomplete unload is done to save time and because side effects are destroyed anyway
		self.log.info("Stopping processes...")
		if self.pruneRecentlyQuit.running:
			self.pruneRecentlyQuit.stop()
		if self.pruneRecentChannels.running:
			self.pruneRecentChannels.stop()
		self.log.info("Closing data storage...")
		if self.storageSyncer.running:
			self.storageSyncer.stop()
		self.storage.close() # a close() will sync() also
		self.log.info("Releasing ports...")
		stopDeferreds.extend(self._unbindPorts())
		return DeferredList(stopDeferreds)
	
	def _loadModules(self):
		for module in getPlugins(IModuleData, txircd.modules):
			if module.name in self.loadedModules:
				continue
			if module.core or module.name in self.config["modules"]:
				self._loadModuleData(module)
		for moduleName in self.config["modules"]:
			if moduleName not in self.loadedModules:
				self.log.warn("The module {module} failed to load.", module=moduleName)
	
	def loadModule(self, moduleName):
		"""
		Loads a module of the specified name.
		Raises ModuleLoadError if the module cannot be loaded.
		If the specified module is currently being unloaded, returns the
		DeferredList specified by the module when it was unloading with a
		callback to try to load the module again once it succeeds.
		"""
		if moduleName in self._unloadingModules:
			deferList = self._unloadingModules[moduleName]
			deferList.addCallback(self._tryLoadAgain, moduleName)
			return deferList
		for module in getPlugins(IModuleData, txircd.modules):
			if module.name == moduleName:
				rebuild(importlib.import_module(module.__module__)) # getPlugins doesn't recompile modules, so let's do that ourselves.
				self._loadModuleData(module)
				self.log.info("Loaded module {module}.", module=moduleName)
				break
	
	def _tryLoadAgain(self, _, moduleName):
		self.loadModule(moduleName)
	
	def _loadModuleData(self, module):
		if not IModuleData.providedBy(module):
			raise ModuleLoadError ("???", "Module does not implement module interface")
		if not module.name:
			raise ModuleLoadError ("???", "Module did not provide a name")
		if module.name in self.loadedModules:
			self.log.debug("Not loading {module.name} because it's already loaded", module=module)
			return
		
		self.log.debug("Beginning to load {module.name}...", module=module)
		module.hookIRCd(self)
		try:
			module.verifyConfig(self.config)
		except ConfigError as e:
			raise ModuleLoadError(module.name, e)
		
		self.log.debug("Loading hooks from {module.name}...", module=module)
		moduleData = {
			"channelmodes": module.channelModes(),
			"usermodes": module.userModes(),
			"actions": module.actions(),
			"usercommands": module.userCommands(),
			"servercommands": module.serverCommands()
		}
		newChannelModes = ({}, {}, {}, {})
		newChannelStatuses = {}
		newUserModes = ({}, {}, {}, {})
		newActions = {}
		newUserCommands = {}
		newServerCommands = {}
		common = False
		self.log.debug("Processing hook data from {module.name}...", module=module)
		for mode in moduleData["channelmodes"]:
			if mode[0] in self.channelModeTypes:
				raise ModuleLoadError (module.name, "Tries to implement channel mode +{} when that mode is already implemented.".format(mode[0]))
			if not IMode.providedBy(mode[2]):
				raise ModuleLoadError (module.name, "Returns a channel mode object (+{}) that doesn't implement IMode.".format(mode[0]))
			if mode[1] == ModeType.Status:
				if mode[4] in self.channelStatusSymbols:
					raise ModuleLoadError (module.name, "Tries to create a channel rank with symbol {} when that symbol is already in use.".format(mode[4]))
				try:
					newChannelStatuses[mode[0]] = (mode[4], mode[3], mode[2])
				except IndexError:
					raise ModuleLoadError (module.name, "Specifies channel status mode {} without a rank or symbol".format(mode[0]))
			else:
				newChannelModes[mode[1]][mode[0]] = mode[2]
			common = True
		for mode in moduleData["usermodes"]:
			if mode[0] in self.userModeTypes:
				raise ModuleLoadError (module.name, "Tries to implement user mode +{} when that mode is already implemented.".format(mode[0]))
			if not IMode.providedBy(mode[2]):
				raise ModuleLoadError (module.name, "Returns a user mode object (+{}) that doesn't implement IMode.".format(mode[0]))
			newUserModes[mode[1]][mode[0]] = mode[2]
			common = True
		for action in moduleData["actions"]:
			if action[0] not in newActions:
				newActions[action[0]] = [(action[2], action[1])]
			else:
				newActions[action[0]].append((action[2], action[1]))
		for command in moduleData["usercommands"]:
			if not ICommand.providedBy(command[2]):
				raise ModuleLoadError (module.name, "Returns a user command object ({}) that doesn't implement ICommand.".format(command[0]))
			if command[0] not in newUserCommands:
				newUserCommands[command[0]] = []
			newUserCommands[command[0]].append((command[2], command[1]))
		for command in moduleData["servercommands"]:
			if not ICommand.providedBy(command[2]):
				raise ModuleLoadError (module.name, "Returns a server command object ({}) that doesnt implement ICommand.".format(command[0]))
			if command[0] not in newServerCommands:
				newServerCommands[command[0]] = []
			newServerCommands[command[0]].append((command[2], command[1]))
			common = True
		if not common:
			common = module.requiredOnAllServers

		self.log.debug("Loaded data from {module.name}; committing data and calling hooks...", module=module)
		
		module.load()
		
		self.loadedModules[module.name] = module
		self._loadedModuleData[module.name] = moduleData
		if common:
			self.commonModules.add(module.name)
		
		self.runActionStandard("moduleload", module.name)
		
		for modeType, typeSet in enumerate(newChannelModes):
			for mode, implementation in typeSet.iteritems():
				self.channelModeTypes[mode] = modeType
				self.channelModes[modeType][mode] = implementation
		for mode, data in newChannelStatuses.iteritems():
			self.channelModeTypes[mode] = ModeType.Status
			self.channelStatuses[mode] = data
			self.channelStatusSymbols[data[0]] = mode
			for index, status in enumerate(self.channelStatusOrder):
				if self.channelStatuses[status][1] < data[1]:
					self.channelStatusOrder.insert(index, mode)
					break
			else:
				self.channelStatusOrder.append(mode)
		for modeType, typeSet in enumerate(newUserModes):
			for mode, implementation in typeSet.iteritems():
				self.userModeTypes[mode] = modeType
				self.userModes[modeType][mode] = implementation
		for action, actionList in newActions.iteritems():
			if action not in self.actions:
				self.actions[action] = []
			for actionData in actionList:
				for index, handlerData in enumerate(self.actions[action]):
					if handlerData[1] < actionData[1]:
						self.actions[action].insert(index, actionData)
						break
				else:
					self.actions[action].append(actionData)
		for command, dataList in newUserCommands.iteritems():
			if command not in self.userCommands:
				self.userCommands[command] = []
			for data in dataList:
				for index, cmd in enumerate(self.userCommands[command]):
					if cmd[1] < data[1]:
						self.userCommands[command].insert(index, data)
						break
				else:
					self.userCommands[command].append(data)
		for command, dataList in newServerCommands.iteritems():
			if command not in self.serverCommands:
				self.serverCommands[command] = []
			for data in dataList:
				for index, cmd in enumerate(self.serverCommands[command]):
					if cmd[1] < data[1]:
						self.serverCommands[command].insert(index, data)
						break
				else:
					self.serverCommands[command].append(data)
		
		self.log.debug("Module {module.name} is now fully loaded.", module=module)
	
	def unloadModule(self, moduleName):
		"""
		Unloads the loaded module with the given name. Raises ValueError
		if the module cannot be unloaded because it's a core module.
		"""
		self._unloadModule(moduleName, True)
		self.log.info("Unloaded module {module}.", module=moduleName)
	
	def _unloadModule(self, moduleName, fullUnload):
		unloadDeferreds = []
		if moduleName not in self.loadedModules:
			return
		module = self.loadedModules[moduleName]
		if fullUnload and module.core:
			raise ValueError ("The module you're trying to unload is a core module.")
		moduleData = self._loadedModuleData[moduleName]
		d = module.unload()
		if d is not None:
			unloadDeferreds.append(d)
		
		if fullUnload:
			d = module.fullUnload()
			if d is not None:
				unloadDeferreds.append(d)
		
		for modeData in moduleData["channelmodes"]:
			if fullUnload: # Unset modes on full unload
				if modeData[1] == ModeType.Status:
					for channel in self.channels.itervalues():
						removeFromChannel = []
						for user, userData in channel.user.iteritems():
							if modeData[0] in userData["status"]:
								removeFromChannel.append((False, modeData[0], user.uuid))
						channel.setModes(removeFromChannel, self.serverID)
				elif modeData[1] == ModeType.List:
					for channel in self.channels.itervalues():
						if modeData[0] in channel.modes:
							removeFromChannel = []
							for paramData in channel.modes[modeData[0]]:
								removeFromChannel.append((False, modeData[0], paramData[0]))
							channel.setModes(removeFromChannel, self.serverID)
				else:
					for channel in self.channels.itervalues():
						if modeData[0] in channel.modes:
							channel.setModes([(False, modeData[0], channel.modes[modeData[0]])], self.serverID)
			
			if modeData[1] == ModeType.Status:
				del self.channelStatuses[modeData[0]]
				del self.channelStatusSymbols[modeData[4]]
				self.channelStatusOrder.remove(modeData[0])
			else:
				del self.channelModes[modeData[1]][modeData[0]]
			del self.channelModeTypes[modeData[0]]
		for modeData in moduleData["usermodes"]:
			if fullUnload: # Unset modes on full unload
				if modeData[1] == ModeType.List:
					for user in self.users.itervalues():
						if modeData[0] in user.modes:
							removeFromUser = []
							for paramData in user.modes[modeData[0]]:
								removeFromUser.append((False, modeData[0], paramData[0]))
							user.setModes(removeFromUser, self.serverID)
				else:
					for user in self.users.itervalues():
						if modeData[0] in user.modes:
							user.setModes([(False, modeData[0], user.modes[modeData[0]])], self.serverID)
			
			del self.userModes[modeData[1]][modeData[0]]
			del self.userModeTypes[modeData[0]]
		for actionData in moduleData["actions"]:
			self.actions[actionData[0]].remove((actionData[2], actionData[1]))
			if not self.actions[actionData[0]]:
				del self.actions[actionData[0]]
		for commandData in moduleData["usercommands"]:
			self.userCommands[commandData[0]].remove((commandData[2], commandData[1]))
			if not self.userCommands[commandData[0]]:
				del self.userCommands[commandData[0]]
		for commandData in moduleData["servercommands"]:
			self.serverCommands[commandData[0]].remove((commandData[2], commandData[1]))
			if not self.serverCommands[commandData[0]]:
				del self.serverCommands[commandData[0]]
		
		del self.loadedModules[moduleName]
		del self._loadedModuleData[moduleName]
		
		if fullUnload:
			self.runActionStandard("moduleunload", module.name)
		
		if unloadDeferreds:
			deferList = DeferredList(unloadDeferreds)
			self._unloadingModules[moduleName] = deferList
			deferList.addCallback(self._removeFromUnloadingList, moduleName)
			return deferList
	
	def _removeFromUnloadingList(self, _, moduleName):
		del self._unloadingModules[moduleName]
	
	def reloadModule(self, moduleName):
		"""
		Reloads the module with the given name.
		Returns a DeferredList if the module unloads with one or more Deferreds.
		May raise ModuleLoadError if the module cannot be loaded.
		"""
		deferList = self._unloadModule(moduleName, False)
		if deferList is None:
			deferList = self.loadModule(moduleName)
		else:
			deferList.addCallback(lambda result: self.loadModule(moduleName))
		return deferList

	def verifyConfig(self, config):
		# IRCd
		if "server_name" not in config:
			raise ConfigValidationError("server_name", "required item not found in configuration file.")
		if not isinstance(config["server_name"], basestring):
			raise ConfigValidationError("server_name", "value must be a string")
		if len(config["server_name"]) > 64:
			config["server_name"] = config["server_name"][:64]
			self.logConfigValidationWarning("server_name", "value is too long and has been truncated", config["server_name"])
		if not re.match(r"^[a-zA-Z0-9.-]+\.[a-zA-Z0-9.-]+$", config["server_name"]):
			raise ConfigValidationError("server_name", "server name must look like a valid hostname.")
		if "server_id" in config:
			if not isinstance(config["server_id"], basestring):
				raise ConfigValidationError("server_id", "value must be a string")
			else:
				config["server_id"] = config["server_id"].upper()
		else:
			randFromName = random.Random(config["server_name"])
			serverID = randFromName.choice(string.digits) + randFromName.choice(string.digits + string.ascii_uppercase) + randFromName.choice(string.digits + string.ascii_uppercase)
			config["server_id"] = serverID
		if len(config["server_id"]) != 3 or not config["server_id"].isalnum() or not config["server_id"][0].isdigit():
			raise ConfigValidationError("server_id", "value must be a 3-character alphanumeric string starting with a number.")
		if "server_description" not in config:
			raise ConfigValidationError("server_description", "required item not found in configuration file.")
		if not isinstance(config["server_description"], basestring):
			raise ConfigValidationError("server_description", "value must be a string")
		if not config["server_description"]:
			raise ConfigValidationError("server_description", "value must not be an empty string")
		if len(config["server_description"]) > 255:
			config["server_description"] = config["server_description"][:255]
			self.logConfigValidationWarning("server_description", "value is too long and has been truncated", config["server_description"])
		if "network_name" not in config:
			raise ConfigValidationError("network_name", "required item not found in configuration file.")
		if not isinstance(config["network_name"], basestring):
			raise ConfigValidationError("network_name", "value must be a string")
		if not config["network_name"]:
			raise ConfigValidationError("network_name", "value must not be an empty string")
		if " " in config["network_name"]:
			raise ConfigValidationError("network_name", "value cannot have spaces")
		if len(config["network_name"]) > 32:
			config["network_name"] = config["network_name"][:32]
			self.logConfigValidationWarning("network_name", "value is too long", config["network_name"])
		if "bind_client" not in config:
			config["bind_client"] = [ "tcp:6667:interface={::}" ]
			self.logConfigValidationWarning("bind_client", "no default client binding specified", "[ \"tcp:6667:interface={::}\" ]")
		if not isinstance(config["bind_client"], list):
			raise ConfigValidationError("bind_client", "value must be a list")
		for bindDesc in config["bind_client"]:
			if not isinstance(bindDesc, basestring):
				raise ConfigValidationError("bind_client", "every entry must be a string")
		if "bind_server" not in config:
			config["bind_server"] = []
		if not isinstance(config["bind_server"], list):
			raise ConfigValidationError("bind_server", "value must be a list")
		for bindDesc in config["bind_server"]:
			if not isinstance(bindDesc, basestring):
				raise ConfigValidationError("bind_server", "every entry must be a string")
		if "modules" not in config:
			config["modules"] = []
		if not isinstance(config["modules"], list):
			raise ConfigValidationError("modules", "value must be a list")
		for module in config["modules"]:
			if not isinstance(module, basestring):
				raise ConfigValidationError("modules", "every entry must be a string")
		if "links" in config:
			if not isinstance(config["links"], dict):
				raise ConfigValidationError("links", "value must be a dictionary")
			for desc, server in config["links"].iteritems():
				if not isinstance(desc, basestring):
					raise ConfigValidationError("links", "\"{}\" is an invalid server description".format(desc))
				if not isinstance(server, dict):
					raise ConfigValidationError("links", "values for \"{}\" must be a dictionary".format(desc))
				if "connect_descriptor" not in server:
					raise ConfigValidationError("links", "server \"{}\" must contain a \"connect_descriptor\" value".format(desc))
				if "in_password" in server:
					if not isinstance(server["in_password"], basestring):
						config["links"][desc]["in_password"] = str(server["in_password"])
				if "out_password" in server:
					if not isinstance(server["out_password"], basestring):
						config["links"][desc]["out_password"] = str(server["out_password"])
		if "datastore_path" not in config:
			config["datastore_path"] = "data.db"
		if "storage_sync_interval" in config and not isinstance(config["storage_sync_interval"], int):
			raise ConfigValidationError(config["storage_sync_interval"], "invalid number")

		# Channels
		if "channel_name_length" in config:
			if not isinstance(config["channel_name_length"], int) or config["channel_name_length"] < 0:
				raise ConfigValidationError("channel_name_length", "invalid number")
			elif config["channel_name_length"] > 64:
				config["channel_name_length"] = 64
				self.logConfigValidationWarning("channel_name_length", "value is too large", 64)
		if "modes_per_line" in config:
			if not isinstance(config["modes_per_line"], int) or config["modes_per_line"] < 0:
				raise ConfigValidationError("modes_per_line", "invalid number")
			elif config["modes_per_line"] > 20:
				config["modes_per_line"] = 20
				self.logConfigValidationWarning("modes_per_line", "value is too large", 20)
		if "channel_listmode_limit" in config:
			if not isinstance(config["channel_listmode_limit"], int) or config["channel_listmode_limit"] < 0:
				raise ConfigValidationError("channel_listmode_limit", "invalid number")
			if config["channel_listmode_limit"] > 256:
				config["channel_listmode_limit"] = 256
				self.logConfigValidationWarning("channel_listmode_limit", "value is too large", 256)

		# Users
		if "user_registration_timeout" in config:
			if not isinstance(config["user_registration_timeout"], int) or config["user_registration_timeout"] < 0:
				raise ConfigValidationError("user_registration_timeout", "invalid number")
			elif config["user_registration_timeout"] < 10:
				config["user_registration_timeout"] = 10
				self.logConfigValidationWarning("user_registration_timeout", "timeout could be too short for clients to register in time", 10)
		if "user_ping_frequency" in config and (not isinstance(config["user_ping_frequency"], int) or config["user_ping_frequency"] < 0):
			raise ConfigValidationError("user_ping_frequency", "invalid number")
		if "hostname_length" in config:
			if not isinstance(config["hostname_length"], int) or config["hostname_length"] < 0:
				raise ConfigValidationError("hostname_length", "invalid number")
			elif config["hostname_length"] > 64:
				config["hostname_length"] = 64
				self.logConfigValidationWarning("hostname_length", "value is too large", 64)
			elif config["hostname_length"] < 4:
				config["hostname_length"] = 4
				self.logConfigValidationWarning("hostname_length", "value is too small", 4)
		if "ident_length" in config:
			if not isinstance(config["ident_length"], int) or config["ident_length"] < 0:
				raise ConfigValidationError("ident_length", "invalid number")
			elif config["ident_length"] > 12:
				config["ident_length"] = 12
				self.logConfigValidationWarning("ident_length", "value is too large", 12)
			elif config["ident_length"] < 1:
				config["ident_length"] = 1
				self.logConfigValidationWarning("ident_length", "value is too small", 1)
		if "gecos_length" in config:
			if not isinstance(config["gecos_length"], int) or config["gecos_length"] < 0:
				raise ConfigValidationError("gecos_length", "invalid number")
			elif config["gecos_length"] > 128:
				config["gecos_length"] = 128
				self.logConfigValidationWarning("gecos_length", "value is too large", 128)
			elif config["gecos_length"] < 1:
				config["gecos_length"] = 1
				self.logConfigValidationWarning("gecos_length", "value is too small", 1)
		if "user_listmode_limit" in config:
			if not isinstance(config["user_listmode_limit"], int) or config["user_listmode_limit"] < 0:
				raise ConfigValidationError("user_listmode_limit", "invalid number")
			if config["user_listmode_limit"] > 256:
				config["user_listmode_limit"] = 256
				self.logConfigValidationWarning("user_listmode_limit", "value is too large", 256)

		# Servers
		if "server_registration_timeout" in config:
			if not isinstance(config["server_registration_timeout"], int) or config["server_registration_timeout"] < 0:
				raise ConfigValidationError("server_registration_timeout", "invalid number")
			elif config["server_registration_timeout"] < 10:
				config["server_registration_timeout"] = 10
				self.logConfigValidationWarning("server_registration_timeout", "timeout could be too short for servers to register in time", 10)
		if "server_ping_frequency" in config and (not isinstance(config["server_ping_frequency"], int) or config["server_ping_frequency"] < 0):
			raise ConfigValidationError("server_ping_frequency", "invalid number")

		for module in self.loadedModules.itervalues():
			module.verifyConfig(config)

	def logConfigValidationWarning(self, key, message, default):
		self.log.warn("Config value \"{configKey}\" is invalid ({message}); the value has been set to a default of \"{default}\".", configKey=key, message=message, default=default)

	def rehash(self):
		"""
		Reloads the configuration file and applies changes.
		"""
		self.log.info("Rehashing...")
		self.config.reload()
		d = self._unbindPorts() # Unbind the ports that are bound
		if d: # And then bind the new ones
			DeferredList(d).addCallback(lambda result: self._bindPorts())
		else:
			self._bindPorts()
		
		try:
			self._logFilter.setLogLevelForNamespace("txircd", LogLevel.levelWithName(self.config["log_level"]))
		except (KeyError, InvalidLogLevelError):
			pass # If we can't set a new log level, we'll keep the old one
		
		for module in self.loadedModules.itervalues():
			module.rehash()
	
	def _bindPorts(self):
		for bindDesc in self.config["bind_client"]:
			try:
				endpoint = serverFromString(reactor, unescapeEndpointDescription(bindDesc))
			except ValueError as e:
				self.log.error(e)
				continue
			listenDeferred = endpoint.listen(UserFactory(self))
			listenDeferred.addCallback(self._savePort, bindDesc, "client")
			listenDeferred.addErrback(self._logNotBound, bindDesc)
		for bindDesc in self.config["bind_server"]:
			try:
				endpoint = serverFromString(reactor, unescapeEndpointDescription(bindDesc))
			except ValueError as e:
				self.log.error(e)
				continue
			listenDeferred = endpoint.listen(ServerListenFactory(self))
			listenDeferred.addCallback(self._savePort, bindDesc, "server")
			listenDeferred.addErrback(self._logNotBound, bindDesc)
	
	def _unbindPorts(self):
		deferreds = []
		for port in self.boundPorts.itervalues():
			d = port.stopListening()
			if d:
				deferreds.append(d)
		return deferreds
	
	def _savePort(self, port, desc, portType):
		self.boundPorts[desc] = port
		self.log.debug("Bound endpoint '{endpointDescription}' for {portType} connections.", endpointDescription=desc, portType=portType)
	
	def _logNotBound(self, err, desc):
		self.log.error("Could not bind '{endpointDescription}': {errorMsg}", endpointDescription=desc, errorMsg=err)
	
	def createUUID(self):
		"""
		Gets the next UUID for a new client.
		"""
		newUUID = self.serverID + self._uid.next()
		while newUUID in self.users: # It'll take over 1.5 billion connections to loop around, but we still
			newUUID = self.serverID + self._uid.next() # want to be extra safe and avoid collisions
		self.log.debug("Generated new UUID {uuid}", uuid=newUUID)
		return newUUID
	
	def _genUID(self):
		uid = "AAAAAA"
		while True:
			yield uid
			uid = self._incrementUID(uid)
	
	def _incrementUID(self, uid):
		if uid == "Z": # The first character must be a letter
			return "A" # So wrap that around
		if uid[-1] == "9":
			return self._incrementUID(uid[:-1]) + "A"
		if uid[-1] == "Z":
			return uid[:-1] + "0"
		return uid[:-1] + chr(ord(uid[-1]) + 1)
	
	def pruneQuit(self):
		compareTime = now() - timedelta(seconds=10)
		remove = []
		for uuid, timeQuit in self.recentlyQuitUsers.iteritems():
			if timeQuit < compareTime:
				remove.append(uuid)
		for uuid in remove:
			del self.recentlyQuitUsers[uuid]
		
		remove = []
		for serverID, timeQuit in self.recentlyQuitServers.iteritems():
			if timeQuit < compareTime:
				remove.append(serverID)
		for serverID in remove:
			del self.recentlyQuitServers[serverID]
	
	def pruneChannels(self):
		removeChannels = []
		for channel, remove in self.recentlyDestroyedChannels.iteritems():
			if remove:
				removeChannels.append(channel)
			elif channel not in self.channels:
				self.recentlyDestroyedChannels[channel] = True
		for channel in removeChannels:
			del self.recentlyDestroyedChannels[channel]
	
	def generateISupportList(self):
		isupport = self.isupport_tokens.copy()
		statusSymbolOrder = "".join([self.channelStatuses[status][0] for status in self.channelStatusOrder])
		isupport["CHANMODES"] = ",".join(["".join(modes) for modes in self.channelModes])
		isupport["CHANNELLEN"] = self.config.get("channel_name_length", 64)
		isupport["NETWORK"] = self.config["network_name"]
		isupport["PREFIX"] = "({}){}".format("".join(self.channelStatusOrder), statusSymbolOrder)
		isupport["STATUSMSG"] = statusSymbolOrder
		isupport["USERMODES"] = ",".join(["".join(modes) for modes in self.userModes])
		self.runActionStandard("buildisupport", isupport)
		isupportList = []
		for key, val in isupport.iteritems():
			if val is None:
				isupportList.append(key)
			else:
				isupportList.append("{}={}".format(key, val))
		return isupportList
	
	def connectServer(self, name):
		"""
		Connect a server with the given name in the configuration.
		Returns a Deferred for the connection when we can successfully connect
		or None if the server is already connected or if we're unable to find
		information for that server in the configuration.
		"""
		if name in self.serverNames:
			return None
		if name not in self.config.get("links", {}):
			return None
		serverConfig = self.config["links"][name]
		endpoint = clientFromString(reactor, unescapeEndpointDescription(serverConfig["connect_descriptor"]))
		d = endpoint.connect(ServerConnectFactory(self))
		d.addCallback(self._completeServerConnection, name)
		return d
	
	def _completeServerConnection(self, result, name):
		self.log.info("Connected to server {serverName}", serverName=name)
		self.runActionStandard("initiateserverconnection", result)
	
	def broadcastToServers(self, fromServer, command, *params, **kw):
		"""
		Broadcasts a message to all connected servers. The fromServer parameter
		should be the server from which the message came; if this server is the
		originating server, specify None for fromServer.
		"""
		for server in self.servers.itervalues():
			if server.nextClosest == self.serverID and server != fromServer:
				server.sendMessage(command, *params, **kw)
	
	def _getActionModes(self, actionName, *params, **kw):
		users = []
		channels = []
		if "users" in kw:
			users = kw["users"]
		if "channels" in kw:
			channels = kw["channels"]
		
		functionList = []
		
		if users:
			genericUserActionName = "modeactioncheck-user-{}".format(actionName)
			genericUserActionNameWithChannel = "modeactioncheck-user-withchannel-{}".format(actionName)
			for modeType in self.userModes:
				for mode, modeObj in modeType.iteritems():
					if actionName not in modeObj.affectedActions:
						continue
					priority = modeObj.affectedActions[actionName]
					actionList = []
					# Because Python doesn't properly capture variables in lambdas, we have to force static capture
					# by wrapping lambdas in more lambdas.
					# I wish Python wasn't this gross.
					for action in self.actions.get("modeactioncheck-user", []):
						actionList.append(((lambda action, actionName, mode: lambda user, *params: action[0](actionName, mode, user, *params))(action, actionName, mode), action[1]))
					for action in self.actions.get("modeactioncheck-user-withchannel", []):
						for channel in channels:
							actionList.append(((lambda action, actionName, mode, channel: lambda user, *params: action[0](actionName, mode, user, channel, *params))(action, actionName, mode, channel), action[1]))
					for action in self.actions.get(genericUserActionName, []):
						actionList.append(((lambda action, mode: lambda user, *params: action[0](mode, user, *params))(action, mode), action[1]))
					for action in self.actions.get(genericUserActionNameWithChannel, []):
						for channel in channels:
							actionList.append(((lambda action, mode, channel: lambda user, *params: action[0](mode, user, channel, *params))(action, mode, channel), action[1]))
					modeUserActionName = "modeactioncheck-user-{}-{}".format(mode, actionName)
					modeUserActionNameWithChannel = "modeactioncheck-user-withchannel-{}-{}".format(mode, actionName)
					for action in self.actions.get(modeUserActionNameWithChannel, []):
						for channel in channels:
							actionList.append(((lambda action, channel: lambda user, *params: action[0](user, channel, *params))(action, channel), action[1]))
					actionList = sorted(self.actions.get(modeUserActionName, []) + actionList, key=lambda action: action[1], reverse=True)
					applyUsers = []
					for user in users:
						for action in actionList:
							param = action[0](user, *params)
							if param is not None:
								if param is not False:
									applyUsers.append((user, param))
								break
					for user, param in applyUsers:
						functionList.append(((lambda modeObj, actionName, user, param: lambda *params: modeObj.apply(actionName, user, param, *params))(modeObj, actionName, user, param), priority))
		
		if channels:
			genericChannelActionName = "modeactioncheck-channel-{}".format(actionName)
			genericChannelActionNameWithUser = "******".format(actionName)
			for modeType in self.channelModes:
				for mode, modeObj in modeType.iteritems():
					if actionName not in modeObj.affectedActions:
						continue
					priority = modeObj.affectedActions[actionName]
					actionList = []
					for action in self.actions.get("modeactioncheck-channel", []):
						actionList.append(((lambda action, actionName, mode: lambda channel, *params: action[0](actionName, mode, channel, *params))(action, actionName, mode), action[1]))
					for action in self.actions.get("modeactioncheck-channel-withuser", []):
						for user in users:
							actionList.append(((lambda action, actionName, mode, user: lambda channel, *params: action[0](actionName, mode, channel, user, *params))(action, actionName, mode, user), action[1]))
					for action in self.actions.get(genericChannelActionName, []):
						actionList.append(((lambda action, mode: lambda channel, *params: action[0](mode, channel, *params))(action, mode), action[1]))
					for action in self.actions.get(genericChannelActionNameWithUser, []):
						for user in users:
							actionList.append(((lambda action, mode, user: lambda channel, *params: action[0](mode, channel, user, *params))(action, mode, user), action[1]))
					modeChannelActionName = "modeactioncheck-channel-{}-{}".format(mode, actionName)
					modeChannelActionNameWithUser = "******".format(mode, actionName)
					for action in self.actions.get(modeChannelActionNameWithUser, []):
						for user in users:
							actionList.append(((lambda action, user: lambda channel, *params: action[0](channel, user, *params))(action, user), action[1]))
					actionList = sorted(self.actions.get(modeChannelActionName, []) + actionList, key=lambda action: action[1], reverse=True)
					applyChannels = []
					for channel in channels:
						for action in actionList:
							param = action[0](channel, *params)
							if param is not None:
								if param is not False:
									applyChannels.append((channel, param))
								break
					for channel, param in applyChannels:
						functionList.append(((lambda modeObj, actionName, channel, param: lambda *params: modeObj.apply(actionName, channel, param, *params))(modeObj, actionName, channel, param), priority))
		return functionList
	
	def _getActionFunctionList(self, actionName, *params, **kw):
		functionList = self.actions.get(actionName, [])
		functionList = functionList + self._getActionModes(actionName, *params, **kw)
		return sorted(functionList, key=lambda action: action[1], reverse=True)
	
	def _combineActionFunctionLists(self, actionLists):
		"""
		Combines multiple lists of action functions into one.
		Assumes all lists are sorted.
		Takes a dict mapping action names to their action function lists.
		Returns a list in priority order (highest to lowest) of (actionName, function) tuples.
		"""
		fullActionList = []
		for actionName, actionList in actionLists.iteritems():
			insertPos = 0
			for action in actionList:
				try:
					while fullActionList[insertPos][1] > action[1]:
						insertPos += 1
					fullActionList.insert(insertPos, (actionName, action[0]))
				except IndexError:
					fullActionList.append((actionName, action[0]))
				insertPos += 1
		return fullActionList
	
	def runActionStandard(self, actionName, *params, **kw):
		"""
		Calls all functions for a given action with the given parameters in
		priority order. Accepts the 'users' and 'channels' keyword arguments to
		determine which mode handlers should be included.
		"""
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			action[0](*params)
	
	def runActionUntilTrue(self, actionName, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until one of them returns a true value. Returns True
		when one of the functions returned True. Accepts the 'users' and
		'channels' keyword arguments to determine which mode handlers should be
		included.
		"""
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			if action[0](*params):
				return True
		return False
	
	def runActionUntilFalse(self, actionName, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until one of them returns a false value. Returns True
		when one of the functions returned False. Accepts the 'users' and
		'channels' keyword arguments to determine which mode handlers should be
		included.
		"""
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			if not action[0](*params):
				return True
		return False
	
	def runActionUntilValue(self, actionName, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until one of them returns a non-None value. Returns the
		value returned by the function that returned a non-None value. Accepts
		the 'users' and 'channels' keyword arguments to determine which mode
		handlers should be included.
		"""
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			value = action[0](*params)
			if value is not None:
				return value
		return None
	
	def runActionFlagTrue(self, actionName, *params, **kw):
		"""
		Calls all functions for a given action with the given parameters in
		priority order. Returns True when one of the functions returns a true
		value. Accepts the 'users' and 'channels' keyword arguments to
		determine which mode handlers should be included.
		"""
		oneIsTrue = False
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			if action[0](*params):
				oneIsTrue = True
		return oneIsTrue
	
	def runActionFlagFalse(self, actionName, *params, **kw):
		"""
		Calls all functions for a given action with the given parameters in
		priority order. Returns True when one of the functions returns a false
		value. Accepts the 'users' and 'channels' keyword arguments to
		determine which mode handlers should be included.
		"""
		oneIsFalse = False
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			if action[0](*params):
				oneIsFalse = True
		return oneIsFalse
	
	def runActionProcessing(self, actionName, data, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until the provided data is all processed (the data
		parameter becomes empty). Accepts 'users' and 'channels' keyword
		arguments to determine which mode handlers should be included.
		"""
		actionList = self._getActionFunctionList(actionName, data, *params, **kw)
		for action in actionList:
			action[0](data, *params)
			if not data:
				return
	
	def runActionProcessingMultiple(self, actionName, dataList, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until the provided data is all processed (all of the
		data structures in the dataList parameter become empty). Accepts
		'users' and 'channels' keyword arguments to determine which mode
		handlers should be included.
		"""
		paramList = dataList + params
		actionList = self._getActionFunctionList(actionName, *paramList, **kw)
		for action in actionList:
			action[0](*paramList)
			for data in dataList:
				if data:
					break
			else:
				return
	
	def runComboActionStandard(self, actionList, **kw):
		"""
		Calls all functions for the given actions with the given parameters in
		priority order. Actions are specifed as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			actionFunc(*actionParameters[actionName])
	
	def runComboActionUntilTrue(self, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until one of the functions returns a true value. Actions
		are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns True if one of the functions returned a true value. Accepts
		'users' and 'channels' keyword arguments to determine which mode
		handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			if actionFunc(*actionParameters[actionName]):
				return True
		return False
	
	def runComboActionUntilFalse(self, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until one of the functions returns a false value.
		Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns True if one of the functions returned a false value. Accepts
		'users' and 'channels' keyword arguments to determine which mode
		handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			if not actionFunc(*actionParameters[actionName]):
				return True
		return False
	
	def runComboActionUntilValue(self, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until one of the functions returns a non-None value.
		Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns the value returned by the function that returned a non-None
		value. Accepts 'users' and 'channels' keyword arguments to determine
		which mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			value = actionFunc(*actionParameters[actionName])
			if value is not None:
				return value
		return None
	
	def runComboActionFlagTrue(self, actionList, **kw):
		"""
		Calls all functions for the given actions with the given parameters in
		priority order. Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns True if any of the functions called returned a true value.
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		oneIsTrue = False
		for actionName, actionFunc in funcList:
			if actionFunc(*actionParameters[actionName]):
				oneIsTrue = True
		return oneIsTrue
	
	def runComboActionFlagFalse(self, actionList, **kw):
		"""
		Calls all functions for the given actions with the given parameters in
		priority order. Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns True if any of the functions called returned a false value.
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		oneIsFalse = False
		for actionName, actionFunc in funcList:
			if not actionFunc(*actionParameters[actionName]):
				oneIsFalse = True
		return oneIsFalse
	
	def runComboActionProcessing(self, data, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until the data given has been processed (the data
		parameter becomes empty). Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = [data] + action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			actionFunc(*actionParameters[actionName])
			if not data:
				break
	
	def runComboActionProcessingMultiple(self, dataList, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until the data given has been processed (all the data
		items in the dataList parameter become empty). Actions are specified as
		a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = dataList + action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			actionFunc(*actionParameters[actionName])
			for data in dataList:
				if data:
					break
			else:
				return
Esempio n. 13
0
class FCMv1Router(FCMRouter):
    """FCM v1 HTTP Router Implementation

    Note: FCM v1 is a newer version of the FCM HTTP API.
    """
    def __init__(self, conf, router_conf, metrics):
        """Create a new FCM router and connect to FCM"""
        self.conf = conf
        self.router_conf = router_conf
        self.metrics = metrics
        self.min_ttl = router_conf.get("ttl", 60)
        self.dryRun = router_conf.get("dryrun", False)
        self.collapseKey = router_conf.get("collapseKey", "webpush")
        self.version = router_conf["version"]
        self.log = Logger()
        self.clients = {}
        try:
            for (sid, creds) in router_conf["creds"].items():
                self.clients[sid] = FCMv1(project_id=creds["projectid"],
                                          service_cred_path=creds["auth"],
                                          logger=self.log,
                                          metrics=self.metrics)
        except Exception as e:
            self.log.error("Could not instantiate FCMv1: missing credentials,",
                           ex=e)
            raise IOError("FCMv1 Bridge not initiated in main")
        self._base_tags = ["platform:fcmv1"]
        self.log.debug("Starting FCMv1 router...")

    def amend_endpoint_response(self, response, router_data):
        # type: (JSONDict, JSONDict) -> None
        response["senderid"] = router_data["app_id"]

    def register(self, uaid, router_data, app_id, *args, **kwargs):
        # type: (str, JSONDict, str, *Any, **Any) -> None
        """Validate that the FCM Instance Token is in the ``router_data``"""
        # "token" is the FCM token generated by the client.
        if "token" not in router_data:
            raise self._error("connect info missing FCM Instance 'token'",
                              status=401,
                              uri=kwargs.get('uri'),
                              senderid=repr(app_id))
        if app_id not in self.clients:
            raise self._error("Invalid SenderID", status=410, errno=105)
        router_data["app_id"] = app_id

    def route_notification(self, notification, uaid_data):
        """Start the FCM notification routing, returns a deferred"""
        router_data = uaid_data["router_data"]
        return self._route(notification, router_data)

    def _route(self, notification, router_data):
        """Blocking FCM call to route the notification"""
        # THIS MUST MATCH THE CHANNELID GENERATED BY THE REGISTRATION SERVICE
        # Currently this value is in hex form.
        data = {"chid": notification.channel_id.hex}
        if not router_data.get("token"):
            raise self._error(
                "No registration token found. "
                "Rejecting message.",
                410,
                errno=106,
                log_exception=False)
        # Payload data is optional. The endpoint handler validates that the
        # correct encryption headers are included with the data.
        if notification.data:
            mdata = self.router_conf.get('max_data', 4096)
            if notification.data_length > mdata:
                raise self._error("This message is intended for a " +
                                  "constrained device and is limited " +
                                  "to 3070 bytes. Converted buffer too " +
                                  "long by %d bytes" %
                                  (notification.data_length - mdata),
                                  413,
                                  errno=104,
                                  log_exception=False)

            data['body'] = notification.data
            data['con'] = notification.headers['encoding']

            if 'encryption' in notification.headers:
                data['enc'] = notification.headers['encryption']
            if 'crypto_key' in notification.headers:
                data['cryptokey'] = notification.headers['crypto_key']
            elif 'encryption_key' in notification.headers:
                data['enckey'] = notification.headers['encryption_key']

        # registration_ids are the FCM instance tokens (specified during
        # registration.
        router_ttl = min(self.MAX_TTL, max(self.min_ttl, notification.ttl
                                           or 0))
        try:
            d = self.clients[router_data["app_id"]].send(
                token=router_data.get("token"),
                payload={
                    "collapse_key": self.collapseKey,
                    "data_message": data,
                    "dry_run": self.dryRun or ('dryrun' in router_data),
                    "ttl": router_ttl
                })
        except KeyError:
            raise self._error("Invalid Application ID specified",
                              404,
                              errno=106,
                              log_exception=False)
        d.addCallback(self._process_reply, notification, router_data,
                      router_ttl)
        d.addErrback(self._process_error)
        return d

    def _process_error(self, failure):
        err = failure.value
        if isinstance(err, FCMAuthenticationError):
            self.log.error("FCM Authentication Error: {}".format(err))
            raise RouterException("Server error", status_code=500, errno=901)
        if isinstance(err, TimeoutError):
            self.log.warn("FCM Timeout: %s" % err)
            self.metrics.increment("notification.bridge.error",
                                   tags=make_tags(self._base_tags,
                                                  reason="timeout"))
            raise RouterException("Server error",
                                  status_code=502,
                                  errno=903,
                                  log_exception=False)
        if isinstance(err, ConnectError):
            self.log.warn("FCM Unavailable: %s" % err)
            self.metrics.increment("notification.bridge.error",
                                   tags=make_tags(
                                       self._base_tags,
                                       reason="connection_unavailable"))
            raise RouterException("Server error",
                                  status_code=502,
                                  errno=902,
                                  log_exception=False)
        if isinstance(err, FCMNotFoundError):
            self.log.debug("FCM Recipient not found: %s" % err)
            self.metrics.increment("notification.bridge.error",
                                   tags=make_tags(self._base_tags,
                                                  reason="recpient_gone"))
            raise RouterException("FCM Recipient no longer available",
                                  status_code=404,
                                  errno=106,
                                  log_exception=False)
        if isinstance(err, RouterException):
            self.log.warn("FCM Error: {}".format(err))
            self.metrics.increment("notification.bridge.error",
                                   tags=make_tags(self._base_tags,
                                                  reason="server_error"))
        return failure

    def _error(self, err, status, **kwargs):
        """Error handler that raises the RouterException"""
        self.log.debug(err, **kwargs)
        return RouterException(err,
                               status_code=status,
                               response_body=err,
                               **kwargs)

    def _process_reply(self, reply, notification, router_data, ttl):
        """Process FCM send reply"""
        # Failures are returned as non-200 messages (404, 410, etc.)
        self.metrics.increment("notification.bridge.sent",
                               tags=self._base_tags)
        self.metrics.increment("notification.message_data",
                               notification.data_length,
                               tags=make_tags(self._base_tags,
                                              destination="Direct"))
        location = "%s/m/%s" % (self.conf.endpoint_url, notification.version)
        return RouterResponse(status_code=201,
                              response_body="",
                              headers={
                                  "TTL": ttl,
                                  "Location": location
                              },
                              logged_status=200)
class TimeSchedule:

    def __init__(self, lock, host='127.0.0.1', port='6800'):
        config = Config()
        self.db = glv.get_value(key='sqlite_db')
        self.user_name = config.get('auth_username', '')
        self.user_password = config.get('auth_password', '')
        self.start_time = time.strftime("%Y %m %d %H %M %S", time.localtime())
        self.server_port = 'http://{}:{}/'.format(host, port)
        self.schedule_post_url = 'http://{}:{}/schedule.json'.format(host, port)
        self.listproject_url = 'http://{}:{}/listprojects.json'.format(host, port)
        self.spider_task_dic = dict()
        self.projects = None
        self.db_lock = lock
        self.ts_lock = threading.Lock()
        self._keys_set = {
            "year",
            "month",
            "day",
            "week",
            "hour",
            "minute",
            "second",
            "y",
            "m",
            "d",
            "w",
            "H",
            "M",
            "S",
        }
        self._keys_dic = {
            "y": "year",
            "m": "month",
            "d": "day",
            "w": "week",
            "H": "hour",
            "M": "minute",
            "S": "second",
        }
        self._keys_set_lis = [[y for y in x] for x in self._keys_set]
        self.CPU_THRESHOLD = 93
        self.MEMORY_THRESHOLD = 96
        self.schedule_logger = Logger(namespace='- Scheduler -')

    def run(self):
        time.sleep(3)
        self.projects = self.list_projects()
        self.schedule_logger.info('scheduler is running')
        count = 1
        while True:
            schedule_sta = self.task_scheduler()
            if not schedule_sta and count == 1:
                self.schedule_logger.info('No Scheduled Spider in Database')
                count += 1
            elif not schedule_sta and count != 1:
                count += 1
            else:
                count = 1
            time.sleep(1)

    def task_scheduler(self):
        self.ts_lock.acquire(blocking=True)
        self.db_lock.acquire()
        db_result = self.db.get(model_name='SpiderScheduleModel',
                                key_list=['hash_str', 'project', 'spider', 'schedule', 'args', 'runtime', 'status'])
        self.db_lock.release()
        self.ts_lock.release()
        schedule_list_raw = [
            {'hash_str': x.hash_str, 'project': x.project, 'spider': x.spider, 'schedule': x.schedule, 'args': x.args, 'runtime': x.runtime,
             'status': x.status}
            for x in db_result if int(x.status) != 0
        ] if db_result else []
        schedule_sta = False
        if schedule_list_raw:
            for each_schedule in schedule_list_raw:
                project = each_schedule.get('project')
                runtime = int(each_schedule.get('runtime'))
                if project in self.projects and runtime > 0:
                    schedule = each_schedule.get('schedule')

                    if any([x in schedule for x in self._keys_set]):
                        try:
                            schedule = json.loads(schedule)
                        except:
                            schedule = eval(schedule)
                    try:
                        if isinstance(schedule, dict):
                            for key in schedule.keys():
                                if key not in self._keys_set:
                                    mean_key = self._check_key(key)
                                    raise ValueError(
                                        'found "{}" in your schedule dict, maybe you mean "{}"'.format(key, mean_key))
                                if key in self._keys_dic:
                                    val = schedule.pop(key)
                                    schedule[self._keys_dic[key]] = val
                            next_time_sep = self.cal_time_sep(**schedule)
                        else:
                            next_time_sep = self.cal_time_sep(schedule_str=schedule, is_str=True)
                        next_time_sep = int(next_time_sep) + 1
                        if next_time_sep > 1:
                            each_schedule['schedule'] = next_time_sep
                            item = '{}-{}'.format(each_schedule['project'], each_schedule['spider'])
                            self.ts_lock.acquire(blocking=True)
                            if self.spider_task_dic.get(item) != 'waiting':
                                self.spider_task_dic[item] = 'waiting'
                                t = threading.Thread(target=self.poster, args=(each_schedule,))
                                try:
                                    t.start()
                                except Exception as THError:
                                    self.schedule_logger.warn('start new job error [ {} ]: {}'.format(item, THError))
                            self.ts_lock.release()
                    except ValueError as V:
                        self.schedule_logger.error('spider runtime schedule error, please check the database: {}'.format(V))
            schedule_sta = True
        return schedule_sta

    def poster(self, dic):
        hash_str = dic.get('hash_str')
        status = int(dic.pop('status'))
        project = dic.get('project')
        spider = dic.get('spider')
        job_str = " %s-%s " % (project, spider)
        args = dic.get('args')
        try:
            args = json.loads(args)
        except:
            args = eval(args)
        wait_time = dic.get('schedule')
        item = '{}-{}'.format(project, spider)
        if project and spider:
            data = {'project': project, 'spider': spider, 'un': self.user_name, 'pwd': self.user_password}
            if args:
                args = self._spider_args_method(args, hash_str)
                data.update(args)
            self.schedule_logger.info('job {} is waiting, countdown {}s'.format(item, wait_time))
            time.sleep(wait_time - 1)
            another_wait_time = 0
            spider_runtime_avg = self.spiders_runtime(project=project, spider=spider)
            if status == 1:
                while not self.is_system_ok():
                    self.schedule_logger.warn('system is fully functioning, wait another 2 seconds to post schedule')
                    time.sleep(2)
                    another_wait_time += 3
                    if another_wait_time >= (wait_time - spider_runtime_avg):
                        self.schedule_logger.warning('wait too long, cancel the job %s' % job_str)
                        return None
                res = json.loads(requests.post(url=self.schedule_post_url, data=data).content)
            elif status == 2:
                res = json.loads(requests.post(url=self.schedule_post_url, data=data).content)
            elif status == 3:
                res = json.loads(requests.post(url=self.schedule_post_url, data=data).content)
            else:
                res = json.loads(requests.post(url=self.schedule_post_url, data=data).content)
            spider_status = res.get('status')
            if spider_status != 'ok':
                spider_status = 'error'
        else:
            self.schedule_logger.error('job project: {}, spider: {} post fail!'.format(project, spider))
            spider_status = 'error'
        self.ts_lock.acquire(blocking=True)
        if spider_status == 'ok':
            self._run_countdown(project=project, spider=spider)
        self.spider_task_dic[item] = spider_status
        self.ts_lock.release()

    def _spider_args_method(self, args, hash_str):
        args_raw = args.copy()
        if args:
            method = args.pop('method', 'normal')
            if method == 'auto_increment':
                next_args = {k: str(int(v)+1) if isinstance(v, int) or (isinstance(v, str) and v.isdigit()) else v for k, v in args.items()}
            elif isinstance(method, dict):
                ex_md = method.get('expression')
                fc_md = method.get('function')
                if ex_md:
                    next_args = eval(ex_md)
                if fc_md:
                    exec(fc_md)
            else:
                next_args = args
            next_args.update({'method': method})
            self.db.update('SpiderScheduleModel', update_dic={'args': next_args}, filter_dic={"hash_str": hash_str})
            return args
        return args_raw

    def spiders_runtime(self, project, spider):
        self.db_lock.acquire()
        res = self.db.get(model_name='SpiderMonitor', key_list=['runtime'],
                          filter_dic={'project': project, 'spider': spider})
        self.db_lock.release()
        spider_list = [int(x.runtime) for x in res if x.runtime.isdigit()] if res else [0]
        return sum(spider_list) / len(spider_list)

    def list_projects(self):
        res = requests.get(url=self.listproject_url)
        projects = {}
        if res:
            projects_list = json.loads(res.content).get('projects')
            if projects_list:
                projects = set(projects_list)
        return projects

    def cal_time_sep(self,
                     year='*',
                     month='*',
                     day='*',
                     week='*',
                     hour='*',
                     minute='*',
                     second='*',
                     schedule_str=None,
                     is_str=False
                     ):
        """
            "%Y-%m-%d %H:%M:%S %w"

        """
        if is_str:
            s = [int(x.strip()) for x in schedule_str.split(',')]
            time_sep = (datetime.datetime(s[0], s[1], s[2], s[3], s[4], s[5]) - datetime.datetime.now()).total_seconds()
            return time_sep
        y = int(time.strftime("%Y", time.localtime()))
        if year != '*' and '*' in year:
            y = int(year.split('/')[-1]) + y
        elif year.isdigit():
            y = int(year)

        if week == '*':
            m = int(time.strftime("%m", time.localtime()))
            if month != '*' and '*' in month:
                m_raw = int(month.split('/')[-1])
                if m_raw >= 12:
                    raise ValueError('month value is too large, please set the year instead')
                m = m_raw + m
                if m > 12:
                    y += m // 12
                    m = m % 12
            elif month.isdigit():
                m = int(month)

            days_in_this_month = self.how_many_days_in_this_month(y, m)
            d = int(time.strftime("%d", time.localtime()))
            if day != '*' and '*' in day:
                d_raw = int(day.split('/')[-1])
                if d_raw > days_in_this_month:
                    raise ValueError('day value is too large, please set the month or the year instead')
                d = d_raw + d
                if d > days_in_this_month:
                    d = d - days_in_this_month
                    m += 1
                    if m > 12:
                        y += 1
                        m = m - 12
            elif day.isdigit():
                d = int(day)

            days_in_this_month = self.how_many_days_in_this_month(y, m)
            H = int(time.strftime("%H", time.localtime()))
            if hour != '*' and '*' in hour:
                H_raw = int(hour.split('/')[-1])
                if H_raw > 24:
                    raise ValueError('hour value is too large, please set the day instead')
                H = H_raw + H
                if H >= 24:
                    H = H - 24
                    d += 1
                    if d > days_in_this_month:
                        d = d - days_in_this_month
                        m += 1
                        if m > 12:
                            y += 1
                            m = m - 12
            elif hour.isdigit():
                H = int(hour)

            days_in_this_month = self.how_many_days_in_this_month(y, m)
            M = int(time.strftime("%M", time.localtime()))
            if minute != '*' and '*' in minute:
                M_raw = int(minute.split('/')[-1])
                if M_raw > 60:
                    raise ValueError('minute value is too large, please set the hour instead')
                M = M_raw + M
                if M >= 60:
                    M = M - 60
                    H += 1
                    if H >= 24:
                        H = H - 24
                        d += 1
                        if d > days_in_this_month:
                            d = d - days_in_this_month
                            m += 1
                            if m > 12:
                                y += 1
                                m = m - 12
            elif minute.isdigit():
                M = int(minute)

            days_in_this_month = self.how_many_days_in_this_month(y, m)
            S = int(time.strftime("%S", time.localtime()))
            if second != '*' and '*' in second:
                S_raw = int(second.split('/')[-1])
                if S_raw > 60:
                    raise ValueError('second value is too large, please set the minute instead')
                S = S_raw + S
                if S >= 60:
                    S = S - 60
                    M += 1
                    if M >= 60:
                        M = M - 60
                        H += 1
                        if H >= 24:
                            H = H - 24
                            d += 1
                            if d > days_in_this_month:
                                d = d - days_in_this_month
                                m += 1
                                if m > 12:
                                    y += 1
                                    m = m - 12
            elif second.isdigit():
                S = int(second)
            time_sep = eval(
                "(datetime.datetime({},{},{}, {},{},{}) - datetime.datetime.now()).total_seconds()".format(y, m, d, H,
                                                                                                           M, S))

        else:
            week_in_this_year = int(time.strftime("%U", time.localtime()))
            w = int(time.strftime("%w", time.localtime()))
            if '*' in week:
                w_raw = int(week.split('/')[-1])
                if w_raw >= 7:
                    raise ValueError('week value is too large, please set the day or the month instead')
                if w_raw < w:
                    week_in_this_year += 1
                w = w_raw
                if week_in_this_year > 53:
                    y += 1
                    week_in_this_year = week_in_this_year - 53

            elif week.isdigit():
                w = int(week)
                if int(week) < w:
                    week_in_this_year += 1

            H = int(time.strftime("%H", time.localtime()))
            if hour != '*' and '*' in hour:
                H_raw = int(hour.split('/')[-1])
                if H_raw >= 24:
                    raise ValueError('hour value is too large, please set the day instead')
                H = H_raw + H
                if H >= 24:
                    H = H - 24
                    w += 1
                    if w >= 7:
                        w = w - 7
                        week_in_this_year += 1
                        if week_in_this_year > 53:
                            y += 1
                            week_in_this_year = week_in_this_year - 53
            elif hour.isdigit():
                H = int(hour)

            M = int(time.strftime("%M", time.localtime()))
            if minute != '*' and '*' in minute:
                M_raw = int(minute.split('/')[-1])
                if M_raw >= 60:
                    raise ValueError('minute value is too large, please set the hour instead')
                M = M_raw + M
                if M >= 60:
                    M = M - 60
                    H += 1
                    if H >= 24:
                        H = H - 24
                        w += 1
                        if w > 7:
                            w = w - 7
                            week_in_this_year += 1
                            if week_in_this_year > 53:
                                y += 1
                                week_in_this_year = week_in_this_year - 53
            elif minute.isdigit():
                M = int(minute)

            S = int(time.strftime("%S", time.localtime()))
            if second != '*' and '*' in second:
                S_raw = int(second.split('/')[-1])
                if S_raw >= 60:
                    raise ValueError('second value is too large, please set the minute instead')
                S = S_raw + S
                if S >= 60:
                    S = S - 60
                    M += 1
                    if M >= 60:
                        M = M - 60
                        H += 1
                        if H >= 24:
                            H = H - 24
                            w += 1
                            if w > 7:
                                w = w - 7
                                week_in_this_year += 1
                                if week_in_this_year > 53:
                                    y += 1
                                    week_in_this_year = week_in_this_year - 53
            elif second.isdigit():
                S = int(second)
            if S >= 60:
                S = S - 60
                M += 1
                if M >= 60:
                    M = M - 60
                    H += 1
                    if H >= 24:
                        H = H - 24
                        w += 1
                        if w > 7:
                            w = w - 7
                            week_in_this_year += 1
                            if week_in_this_year > 53:
                                y += 1
                                week_in_this_year = week_in_this_year - 53
            m, d = self.get_month_and_days_by_week(year=y, week_in_this_year=week_in_this_year, week=w)
            time_sep = eval(
                "(datetime.datetime({},{},{}, {},{},{}) - datetime.datetime.now()).total_seconds()".format(y, m, d, H,
                                                                                                           M, S))

        return time_sep

    def get_month_and_days_by_week(self, year, week_in_this_year, week):
        days = week_in_this_year * 7 + week
        if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
            Fe = 29
        else:
            Fe = 28
        month_lis = [31, Fe, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
        month_count = 1
        days_count = 0
        for month_days in month_lis:
            days = days - month_days
            if days > 0:
                month_count += 1
            elif days == 0:
                days_count = 0
                month_count += 1
                break
            else:
                days_count = days + month_days
                break
        return [month_count, days_count]

    def how_many_days_in_this_month(self, y, m):
        if m in (1, 3, 5, 7, 8, 10, 12):
            days = 31
        elif m in (4, 6, 9, 11):
            days = 30
        else:
            if (y % 4 == 0 and y % 100 != 0) or (y % 400 == 0):
                days = 29
            else:
                days = 28
        return days

    def is_system_ok(self):
        is_pass = True
        cpu_list = psutil.cpu_percent(interval=1, percpu=True)
        memory_percent = psutil.virtual_memory().percent
        if cpu_list and memory_percent:
            is_cpu_ok = True
            if min(cpu_list) > self.CPU_THRESHOLD:
                is_cpu_ok = False
            is_memo_ok = True
            if memory_percent > self.MEMORY_THRESHOLD:
                is_memo_ok = False
            if not is_cpu_ok or not is_memo_ok:
                is_pass = False
        return is_pass

    def _check_key(self, key):
        key_lis = [x for x in key]
        count_dic = dict()
        for ksl in self._keys_set_lis:
            o_key = ''.join(ksl)
            score = 0
            for k in key_lis:
                if k in ksl:
                    score += 1
            count_dic[o_key] = score
        best_math = sorted(count_dic, key=count_dic.__getitem__, reverse=True)[0]
        return best_math

    def _run_countdown(self, project, spider):
        db_schedule = self.db.get(model_name='SpiderScheduleModel', key_list=['id', 'runtime'],
                                  filter_dic={'project': project, 'spider': spider})
        run_time_in_db = [x.runtime for x in db_schedule][0] if db_schedule else 0
        the_id = [x.id for x in db_schedule][0] if db_schedule else None
        if run_time_in_db > 0 and the_id is not None:
            rt = int(run_time_in_db) - 1
            self.db.update(model_name='SpiderScheduleModel', update_dic={"runtime": rt}, filter_dic={"id": the_id})
Esempio n. 15
0
class BlockchainInterface:
    """
    Interacts with a solidity compiler and a registry in order to instantiate compiled
    ethereum contracts with the given web3 provider backend.
    """

    TIMEOUT = 180  # seconds
    NULL_ADDRESS = '0x' + '0' * 40

    _instance = NO_BLOCKCHAIN_CONNECTION.bool_value(False)
    process = NO_PROVIDER_PROCESS.bool_value(False)
    Web3 = Web3

    _contract_factory = Contract

    class InterfaceError(Exception):
        pass

    class NoProvider(InterfaceError):
        pass

    class ConnectionFailed(InterfaceError):
        pass

    class UnknownContract(InterfaceError):
        pass

    def __init__(self,
                 poa: bool = True,
                 provider_process: NuCypherGethProcess = NO_PROVIDER_PROCESS,
                 provider_uri: str = NO_BLOCKCHAIN_CONNECTION,
                 transacting_power: TransactingPower = READ_ONLY_INTERFACE,
                 provider: Web3Providers = NO_BLOCKCHAIN_CONNECTION,
                 registry: EthereumContractRegistry = None):
        """
        A blockchain "network interface"; The circumflex wraps entirely around the bounds of
        contract operations including compilation, deployment, and execution.

         Filesystem          Configuration           Node              Client                  EVM
        ================ ====================== =============== =====================  ===========================

         Solidity Files -- SolidityCompiler ---                  --- HTTPProvider ------ ...
                                               |                |
                                               |                |

                                                 *Blockchain* -- IPCProvider ----- External EVM (geth, parity...)

                                               |      |         |
                                               |      |         |
         Registry File -- ContractRegistry ---        |          ---- TestProvider ----- EthereumTester
                                                      |
                        |                             |                                         |
                        |                             |
                                                                                        PyEVM (Development Chain)
         Runtime Files --                 -------- Blockchain
                                         |
                        |                |             |

         Key Files ------ CharacterConfiguration -------- Agent ... (Contract API)

                        |                |             |
                        |                |
                        |                 ---------- Actor ... (Blockchain-Character API)
                        |
                        |                              |
                        |
         Config File ---                           Character ... (Public API)

                                                       |

                                                     Human


        The Blockchain is the junction of the solidity compiler, a contract registry, and a collection of
        web3 network providers as a means of interfacing with the ethereum blockchain to execute
        or deploy contract code on the network.


        Compiler and Registry Usage
        -----------------------------

        Contracts are freshly re-compiled if an instance of SolidityCompiler is passed; otherwise,
        The registry will read contract data saved to disk that is be used to retrieve contact address and op-codes.
        Optionally, A registry instance can be passed instead.


        Provider Usage
        ---------------
        https: // github.com / ethereum / eth - tester     # available-backends


        * HTTP Provider - Web3 HTTP provider, typically JSON RPC 2.0 over HTTP
        * Websocket Provider - Web3 WS provider, typically JSON RPC 2.0 over WS, supply endpoint uri and websocket=True
        * IPC Provider - Web3 File based IPC provider transported over standard I/O
        * Custom Provider - A pre-initialized web3.py provider instance to attach to this interface

        """

        self.log = Logger('Blockchain')
        self.poa = poa
        self.provider_uri = provider_uri
        self._provider = provider
        self._provider_process = provider_process
        self.client = NO_BLOCKCHAIN_CONNECTION
        self.transacting_power = transacting_power
        self.registry = registry
        BlockchainInterface._instance = self

    def __repr__(self):
        r = '{name}({uri})'.format(name=self.__class__.__name__,
                                   uri=self.provider_uri)
        return r

    @classmethod
    def from_dict(cls, payload: dict, **overrides) -> 'BlockchainInterface':

        # Apply overrides
        payload.update({k: v for k, v in overrides.items() if v is not None})

        registry = EthereumContractRegistry(
            registry_filepath=payload['registry_filepath'])
        blockchain = cls(provider_uri=payload['provider_uri'],
                         registry=registry)
        return blockchain

    def to_dict(self) -> dict:
        payload = dict(provider_uri=self.provider_uri,
                       poa=self.poa,
                       registry_filepath=self.registry.filepath)
        return payload

    def _configure_registry(self, fetch_registry: bool = True) -> None:
        RegistryClass = EthereumContractRegistry._get_registry_class(
            local=self.client.is_local)
        if fetch_registry:
            registry = RegistryClass.from_latest_publication()
        else:
            registry = RegistryClass()
        self.registry = registry
        self.log.info("Using contract registry {}".format(
            self.registry.filepath))

    @property
    def is_connected(self) -> bool:
        """
        https://web3py.readthedocs.io/en/stable/__provider.html#examples-using-automated-detection
        """
        if self.client is NO_BLOCKCHAIN_CONNECTION:
            return False
        return self.client.is_connected

    def disconnect(self) -> None:
        if self._provider_process:
            self._provider_process.stop()
        self._provider_process = NO_PROVIDER_PROCESS
        self._provider = NO_BLOCKCHAIN_CONNECTION
        BlockchainInterface._instance = NO_BLOCKCHAIN_CONNECTION

    @classmethod
    def reconnect(cls, *args, **kwargs) -> 'BlockchainInterface':
        return cls._instance

    def attach_middleware(self):

        # For use with Proof-Of-Authority test-blockchains
        if self.poa is True:
            self.log.debug('Injecting POA middleware at layer 0')
            self.client.inject_middleware(geth_poa_middleware, layer=0)

    def connect(self, fetch_registry: bool = True, sync_now: bool = False):

        # Spawn child process
        if self._provider_process:
            self._provider_process.start()
            provider_uri = self._provider_process.provider_uri(scheme='file')
        else:
            provider_uri = self.provider_uri
            self.log.info(
                f"Using external Web3 Provider '{self.provider_uri}'")

        # Attach Provider
        self._attach_provider(provider=self._provider,
                              provider_uri=provider_uri)
        self.log.info("Connecting to {}".format(self.provider_uri))
        if self._provider is NO_BLOCKCHAIN_CONNECTION:
            raise self.NoProvider(
                "There are no configured blockchain providers")

        # Connect Web3 Instance
        try:
            self.w3 = self.Web3(provider=self._provider)
            self.client = Web3Client.from_w3(w3=self.w3)
        except requests.ConnectionError:  # RPC
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is RPC enabled?'
            )
        except FileNotFoundError:  # IPC File Protocol
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is IPC enabled?'
            )
        else:
            self.attach_middleware()

        # Wait for chaindata sync
        if sync_now:
            self.client.sync()

        # Establish contact with NuCypher contracts
        if not self.registry:
            self._configure_registry(fetch_registry=fetch_registry)

        return self.is_connected

    @property
    def provider(self) -> Union[IPCProvider, WebsocketProvider, HTTPProvider]:
        return self._provider

    def _attach_provider(self,
                         provider: Web3Providers = None,
                         provider_uri: str = None) -> None:
        """
        https://web3py.readthedocs.io/en/latest/providers.html#providers
        """

        if not provider_uri and not provider:
            raise self.NoProvider("No URI or provider instances supplied.")

        if provider_uri and not provider:
            uri_breakdown = urlparse(provider_uri)

            if uri_breakdown.scheme == 'tester':
                providers = {
                    'pyevm': _get_tester_pyevm,
                    'geth': _get_test_geth_parity_provider,
                    'parity-ethereum': _get_test_geth_parity_provider,
                }
                provider_scheme = uri_breakdown.netloc
            else:
                providers = {
                    'auto': _get_auto_provider,
                    'infura': _get_infura_provider,
                    'ipc': _get_IPC_provider,
                    'file': _get_IPC_provider,
                    'ws': _get_websocket_provider,
                    'http': _get_HTTP_provider,
                    'https': _get_HTTP_provider,
                }
                provider_scheme = uri_breakdown.scheme
            try:
                self._provider = providers[provider_scheme](provider_uri)
            except KeyError:
                raise ValueError(
                    f"{provider_uri} is an invalid or unsupported blockchain provider URI"
                )
            else:
                self.provider_uri = provider_uri or NO_BLOCKCHAIN_CONNECTION
        else:
            self._provider = provider

    def send_transaction(
        self,
        contract_function: ContractFunction,
        sender_address: str,
        payload: dict = None,
    ) -> dict:

        if self.transacting_power is READ_ONLY_INTERFACE:
            raise self.InterfaceError(str(READ_ONLY_INTERFACE))

        #
        # Build
        #

        if not payload:
            payload = {}

        nonce = self.client.w3.eth.getTransactionCount(sender_address)
        payload.update({
            'chainId': int(self.client.net_version),
            'nonce': nonce,
            'from': sender_address,
            'gasPrice': self.client.gas_price,
            # 'gas': 0,  # TODO: Gas Management
        })

        # Get interface name
        deployment = True if isinstance(contract_function,
                                        ContractConstructor) else False

        try:
            transaction_name = contract_function.fn_name.upper()
        except AttributeError:
            if deployment:
                transaction_name = 'DEPLOY'
            else:
                transaction_name = 'UNKNOWN'

        payload_pprint = dict(payload)
        payload_pprint['from'] = to_checksum_address(payload['from'])
        payload_pprint = ', '.join("{}: {}".format(k, v)
                                   for k, v in payload_pprint.items())
        self.log.debug(f"[TX-{transaction_name}] | {payload_pprint}")

        # Build transaction payload
        try:
            unsigned_transaction = contract_function.buildTransaction(payload)
        except ValidationError as e:
            # TODO: Handle validation failures for gas limits, invalid fields, etc.
            self.log.warn(f"Validation error: {e}")
            raise
        else:
            if deployment:
                self.log.info(
                    f"Deploying contract: {len(unsigned_transaction['data'])} bytes"
                )

        #
        # Broadcast
        #

        signed_raw_transaction = self.transacting_power.sign_transaction(
            unsigned_transaction)
        txhash = self.client.send_raw_transaction(signed_raw_transaction)

        try:
            receipt = self.client.wait_for_receipt(txhash,
                                                   timeout=self.TIMEOUT)
        except TimeExhausted:
            # TODO: Handle transaction timeout
            raise
        else:
            self.log.debug(
                f"[RECEIPT-{transaction_name}] | txhash: {receipt['transactionHash'].hex()}"
            )

        #
        # Confirm
        #

        # Primary check
        deployment_status = receipt.get('status', UNKNOWN_TX_STATUS)
        if deployment_status is 0:
            failure = f"Transaction transmitted, but receipt returned status code 0. " \
                      f"Full receipt: \n {pprint.pformat(receipt, indent=2)}"
            raise self.InterfaceError(failure)

        if deployment_status is UNKNOWN_TX_STATUS:
            self.log.info(
                f"Unknown transaction status for {txhash} (receipt did not contain a status field)"
            )

            # Secondary check TODO: Is this a sensible check?
            tx = self.client.get_transaction(txhash)
            if tx["gas"] == receipt["gasUsed"]:
                raise self.InterfaceError(
                    f"Transaction consumed 100% of transaction gas."
                    f"Full receipt: \n {pprint.pformat(receipt, indent=2)}")

        return receipt

    def get_contract_by_name(
            self,
            name: str,
            proxy_name: str = None,
            use_proxy_address: bool = True) -> Union[Contract, List[tuple]]:
        """
        Instantiate a deployed contract from registry data,
        and assimilate it with its proxy if it is upgradeable,
        or return all registered records if use_proxy_address is False.
        """
        target_contract_records = self.registry.search(contract_name=name)

        if not target_contract_records:
            raise self.UnknownContract(
                f"No such contract records with name {name}.")

        if proxy_name:  # It's upgradeable
            # Lookup proxies; Search for a published proxy that targets this contract record

            proxy_records = self.registry.search(contract_name=proxy_name)

            results = list()
            for proxy_name, proxy_addr, proxy_abi in proxy_records:
                proxy_contract = self.client.w3.eth.contract(
                    abi=proxy_abi,
                    address=proxy_addr,
                    ContractFactoryClass=self._contract_factory)

                # Read this dispatcher's target address from the blockchain
                proxy_live_target_address = proxy_contract.functions.target(
                ).call()
                for target_name, target_addr, target_abi in target_contract_records:

                    if target_addr == proxy_live_target_address:
                        if use_proxy_address:
                            pair = (proxy_addr, target_abi)
                        else:
                            pair = (proxy_live_target_address, target_abi)
                    else:
                        continue

                    results.append(pair)

            if len(results) > 1:
                address, abi = results[0]
                message = "Multiple {} deployments are targeting {}".format(
                    proxy_name, address)
                raise self.InterfaceError(message.format(name))

            else:
                selected_address, selected_abi = results[0]

        else:  # It's not upgradeable
            if len(target_contract_records) != 1:
                m = "Multiple records registered for non-upgradeable contract {}"
                raise self.InterfaceError(m.format(name))
            _target_contract_name, selected_address, selected_abi = target_contract_records[
                0]

        # Create the contract from selected sources
        unified_contract = self.client.w3.eth.contract(
            abi=selected_abi,
            address=selected_address,
            ContractFactoryClass=self._contract_factory)

        return unified_contract
Esempio n. 16
0
class Controller(service.MultiService):
    targets = {}
    services = []
    binary_light_list = []
    hvac_list = []
    media_player_list = []
    messager = None
    server_list = []
    shutter_list = []
    camera_list = []
    multiscreen_list = []
    dimmable_light_list = []
    ambi_light_list = []
    event_catcher = None
    cloud_event_catcher = None
    subscriptions = {}
    subscriptions_cloud = {}
    searchables = {}
    ready_to_close = False
    current_device = None
    cloud = False
    lan = False
    agent = None

    def __init__(
            self, parent=None, searchables=None, xmldir=None,
            network='lan', cloud_user=None, cloud_servers=[],
            logger=None, uid=None, messager=None):
        self.connected = False
        self.messager = messager
        self.app_paused = False
        self.fail_count = 0
        if not logger:
            self.log = Logger()
        else:
            self.log = logger
        self.log.debug('UPnP controller starts')
        self.xmldir = xmldir
        self.devices = {}
        self._services = {}
        self.parent = parent
#         self.amp = ControllerAmp(self)
        if uid:
            self.uuid = uid
        else:
            self.uuid = str(
                uuid.uuid5(
                    uuid.NAMESPACE_DNS,
                    socket.gethostname() + 'onDemand_Controller'))
        if searchables:
            for typ in searchables:
                self.searchables.update({typ[0]: typ[1]})
#                 print(self.searchables)
        else:
            self.searchables = {'upnp:rootdevice': self.log.debug}
        if network in ('lan', 'both'):
            self.log.debug('UPnP classic enabled')
            self.lan = True
            self.listener = ssdp.SSDP_Listener(self)
            self.mcast = internet.MulticastServer(  # @UndefinedVariable
                SSDP_PORT,
                self.listener,
                listenMultiple=True,
                interface=SSDP_ADDR_V4)
            self.mcast.setServiceParent(self)
            self.ssdp_cli = ssdp.SSDP_Client(
                self, get_default_v4_address(), device=False)
            self.ucast = internet.UDPServer(  # @UndefinedVariable
                0, self.ssdp_cli, self.ssdp_cli.interface)
            self.ucast.setServiceParent(self)
#             self.agent = Agent(reactor)
        if network in ('cloud', 'both'):
            if cloud_user:
                self.log.debug('UPnP Cloud enabled')
                self.cloud = True
                self._jid, secret = cloud_user
                self.users = {self._jid: {'state': True}}
                for user in cloud_servers:
                    self.users.update({user: {'state': False}})
                self.hosts = {}
                self.resourcepart = ''.join((
                    'urn:schemas-upnp-org:cloud-1-0:ControlPoint:1:uuid:',
                    self.uuid))
                full_jid = ''.join(
                    (self._jid, '/', self.resourcepart))
                self.jid = jid = JID(full_jid)
                self.reactor = reactor
                f = client.XMPPClientFactory(jid, secret)
                f.addBootstrap(
                    xmlstream.STREAM_CONNECTED_EVENT, self.cloud_connected)
                f.addBootstrap(
                    xmlstream.STREAM_END_EVENT, self.cloud_disconnected)
                f.addBootstrap(
                    xmlstream.STREAM_AUTHD_EVENT, self.authenticated)
                f.addBootstrap(
                    xmlstream.INIT_FAILED_EVENT, self.cloud_failed)
                self.connector = endpoints.HostnameEndpoint(
                    reactor, jid.host, 5222)
                self.factory = f
#             factory = Factory()
#             factory.protocol = ControllerAmp(self)
#             amp_service = internet.TCPServer(  # @UndefinedVariable
#                 4343, factory)
#             amp_service.setServiceParent(self)
#                 self.connector = SRVConnector(
#                     reactor, 'xmpp-client', jid.host, f, defaultPort=5222)
#         log.startLogging(sys.stdout)

    def startService(self):
        '''
        '''
        service.MultiService.startService(self)
        if self.cloud:
            self.connector.connect(self.factory)
            self.log.debug('Cloud Service started')
        if self.lan:
            t = task.LoopingCall(self.search_devices)
            t.start(15)
            self.log.debug('SSDP Service started')

    def resume(self):
        self.app_paused = False
        if not self.connected:
            if self.cloud:
                self.connector.connect(self.factory)
                self.log.debug('Cloud Service started')
            if self.lan:
                t = task.LoopingCall(self.search_devices)
                t.start(15)
                self.log.debug('SSDP Service started')

    def stopService(self):
        self.log.debug('Stopping controller service...')
        self.clean()
#         d.addCallback(lambda ignored: service.MultiService.stopService(self))
        service.MultiService.stopService(self)
#         reactor.callLater(10, reactor.stop)  # @UndefinedVariable

    def cloud_disconnected(self, reason):
        if not reason:
            reason = 'Unknown'
        self.log.warn('Cloud Server disconnected: %s' % reason)
        self.connected = False
        if not self.app_paused and self.fail_count < 10:
            self.fail_count += 1
            self.resume()

    def cloud_failed(self, failure):
        self.log.error('Cloud Login failed: %s' % str(failure))

#         self.xmlstream.sendFooter()

    def clean(self):
        return reactor.callInThread(  # @UndefinedVariable
            threads.blockingCallFromThread, *(reactor, self.cleanfunc))

    def cleanfunc(self):

        def cleaned(res):
            self.log.debug('cleaned')
            if self.cloud:
                self.xmlstream.sendFooter()
        dl = []
        if self.lan:
            for name in self.subscriptions.keys():
                dl.append(self.unsubscribe(name))
        if self.cloud:
            for name in self.subscriptions_cloud.keys():
                dl.append(self.unsubscribe(name))
        d = defer.DeferredList(dl)
        d.addCallback(cleaned)
        return d

    def cloud_connected(self, xs):
        self.log.debug('Cloud Connected')
        self.fail_count = 0
        self.connected = True
        self._services = {}
        self.subscriptions = {}
        self.xmlstream = xs
#         xs.rawDataInFn = self.rawDataIn

    def authenticated(self, xs):

        self.log.debug('Cloud Authenticated')
        presence = domish.Element((None, 'presence'))
        xs.send(presence)
        xs.addObserver('/presence', self.on_presence)
        xs.addObserver('/iq', self.on_iq)
        xs.addObserver('/message', self.on_event)
        disco = IQ(xs, 'get')
        disco.addElement(('http://jabber.org/protocol/disco#items', 'query'))
        disco.addCallback(self.cloud_discovered)
        disco.send()
#         self.reactor.callLater(120, xs.sendFooter)
        self.reactor.callLater(5, self.check_users)

    def check_users(self):
        for user, value in self.users.items():
            if value['state'] is False:
                iq = IQ(self.xmlstream, 'set')
                query = domish.Element(('jabber:iq:roster', 'query'))
                item = domish.Element((None, 'item'))
                item['name'] = user
                item['jid'] = user
                item.addElement('group', content='hosts')
                query.addChild(item)
                iq.addChild(query)
                iq.addCallback(self.cloud_subscribe, user)
#                 print('send IQ: %s' % (iq.toXml().encode('utf-8')))
                iq.send()

    def cloud_subscribe(self, jid, result):
        self.log.debug('Subscribe callback from %s' % jid)
        presence = domish.Element((None, 'presence'))
        presence['type'] = 'subscribe'
        presence['to'] = jid
        self.xmlstream.send(presence)

    def on_event(self, message):
        if not self.cloud_event_catcher:
            reactor.callLater(1, self.on_event, message)  # @UndefinedVariable
            return
        if message.name == 'iq':
            if message['type'] == 'result':
                try:
                    last = ''
                    for child in message.children[0].children[0].children:
                        last = child.children[0]
                except KeyError:
                    return
#                 print(message.toXml())
#                 print(last.toXml())
                self.cloud_event_catcher.receive(last.toXml().encode('utf-8'))
        elif message.children[0].name == 'event':
            evt = message.children[0]
            items = evt.children[0]
            node_name = str(items['node'])
            if node_name in self.subscriptions_cloud:
                for item in items.children:
                    propertyset = item.children[0]
                    self.cloud_event_catcher.receive(
                        (node_name, propertyset.toXml().encode('utf-8'),))

    def rawDataIn(self, buf):
        print(
            "Device RECV: %s"
            % unicode(buf, 'utf-8').encode('ascii', 'replace'))

    def on_presence(self, resp):
        self.log.debug('got presence: %s' % resp.toXml().encode('utf-8'))
#         print('from :%s' % resp['from'])
        user, host, res = parse(resp['from'])
        jid = '@'.join((user, host))
        if resp.hasAttribute('type'):
            if resp['type'] == 'subscribed':
                if jid in self.users:
                    self.users[jid].update({'state': True})
                    if 'services' in self.users[jid]:
                        self.users[jid]['services'].append(res)
                    else:
                        self.users[jid].update({'services': [res]})
                    presence = domish.Element((None, 'presence'))
                    presence['type'] = 'subscribe'
                    presence['to'] = resp['from']
                    self.xmlstream.send(presence)
                else:
                    presence = domish.Element((None, 'presence'))
                    presence['type'] = 'denying'
                    presence['to'] = resp['from']
                    self.xmlstream.send(presence)
            elif resp['type'] == 'unsubscribed':
                if jid in self.users:
                    self.log.warn('subscription failed: %s' % resp['from'])
                return
        for child in resp.elements():
            if child.name == 'ConfigIdCloud':
                self.log.debug('Found UPnP Cloud device : %s type is: %s' % (
                    jid,
                    res))
                info = IQ(self.xmlstream, 'get')
#                 info['to'] = resp['from']
                query = domish.Element(
                    ('urn:schemas-upnp-org:cloud-1-0', 'query'))
                query['type'] = 'description'
                query['name'] = ':'.join(res.split(':')[-2:])
                info.addChild(query)
                info.addCallback(self.on_description, res)
#                 info.send()
                info.send(to=resp['from'])

    def on_description(self, resource, iq):
        location = iq['from']
        clbk = self.searchables[
            self.searchables.keys()[0]]
        if iq['type'] == 'result':
            if iq.children[0].name == 'query'\
                    and iq.children[0]['type'] == 'described':
                self.update_devices(
                    resource,
                    location,
                    clbk,
                    xml=iq.children[0].children[0].toXml())

    def cloud_discovered(self, iq):
        self.log.debug('Discovered item: %s' % iq.toXml().encode('utf-8'))
        if iq['type'] == 'result':
            for child in iq.children:
                if child.name == 'query':
                    for grandchild in child.children:
                        if grandchild['jid'].encode('utf-8') == self.full_jid:
                            continue
                        if grandchild['name'].encode('utf-8')\
                                in self.hosts:
                            self.hosts[
                                grandchild['name'].encode('utf-8')].append(
                                    grandchild['jid'].encode('utf-8'))
                        else:
                            self.hosts.update(
                                {grandchild['name'].encode('utf-8'):
                                    [grandchild['jid'].encode('utf-8')]})
#         print(self.hosts)

    def on_iq(self, iq):
        pass
#         print('got iq: %s' % iq.toXml())
#         try:
#             print('from :%s' % iq['from'])
#         except KeyError:
#             print('From I don\'t know: %s' % iq.toXml())
#         print('type: %s' % iq['type'])

    def search_devices(self):
        for search in self.searchables:
            self.ssdp_cli.send_MSEARCH(search, uuid=self.uuid)

    def update_hosts(self, host, unicast=False):

        if 'location' in host:
            if 'usn' in host:
                if host['usn'] in self.devices:
                    return
                device = host['usn'].split('::')
                if len(device) > 1:
                    uid = device[0].split(':')[1]
                    if uid in self.devices:
                        return
                    typ = device[1]
                    if typ in self.searchables:
                        self.update_devices(
                            uid, host['location'], self.searchables[typ])
#                         self.devices.append(uid)

    def send_message(self, message_type, name, id_, value):
        if self.messager:
            if isinstance(value, dict):
                self.messager.callRemote(message_type,
                                         name=name,
                                         id_=id_,
                                         value=json.dumps(value))

#                 for v in value.iteritems():
#                     if not v or isinstance(v, dict):
#                         print('zap')
#                         continue
#                     print(v)
#                     self.messager.callRemote(message_type,
#                                              name=name,
#                                              id_=id_,
#                                              value=':'.join((k, v)))
            else:
                self.messager.callRemote(message_type,
                                         name=name,
                                         id_=id_,
                                         value=value)

    def update_devices(self, uid, location, callback_fct, xml=None):
        def device_parsed(dic):
            self.devices.update(dic)
            if callable(callback_fct):
                callback_fct(dic)
            else:
                self.send_message(Event, callback_fct, uid, dic[uid])
                if self.messager:
                    self.messager.parent.notify(
                        'New Device detected:', dic[uid]['name'])
        uid = bytes(uid)
        self.log.debug('new device %s: %s' % (uid, location))
        if '@' in location:
            if xml:
                device_parsed(self.parse_host(xml, location, uid))
                return
        else:
            if not self.agent:
                self.agent = Agent(reactor)
            d = self.agent.request('GET', location)
            d.addCallback(readBody)
        d.addCallback(self.parse_host, *(location, uid))
        d.addCallback(device_parsed)

    def parse_host(self, xml, location, uid):
        typ = 'upnp'
        loc = None
        if '@' in location:
            url_prefix = ''.join(('xmpp://', location))
            net = 'cloud'
        else:
            url_prefix = urlparse(location).netloc
            net = 'lan'
        try:
            root = et.fromstring(xml)
        except:
            self.log.error('bad xml: %s' % xml)
            return {}
        host = {}
        icon = None
        for children in root:
            if children.tag.split('}')[-1] == 'device':
                for att in children:
                    if att.tag.split('}')[-1] == 'friendlyName':
                        fname = att.text
                    if att.tag.split('}')[-1] == 'deviceType':
                        devtype = att.text
                        if 'Source' in att.text:
                            typ = 'oh'
                    if att.tag.split('}')[-1] == 'iconList':
                        for ico in att:
                            #  log.debug(ico)
                            for info in ico:
                                if info.tag.split('}')[-1] == 'width':
                                    if int(info.text) <= 96:
                                        if ico[4].text.startswith('/'):
                                            icon = 'http://'\
                                                + url_prefix\
                                                + ico[4].text
                                        else:
                                            icon = ico[4].text
                    if att.tag.split('}')[-1] == 'serviceList':
                        svc = {}
                        for serv in att:
                            d = {}
                            for info in serv:
                                if 'URL' in info.tag.split('}')[-1]:
                                    if net == 'lan':
                                        d.update({info.tag.split('}')[-1]:
                                                  'http://' +
                                                  url_prefix + info.text})
                                    else:
                                        d.update(
                                            {info.tag.split('}')[-1]:
                                             url_prefix + info.text})
                                else:
                                    d.update(
                                        {info.tag.split('}')[-1]: info.text})
                            svc.update({d['serviceType']: d})
                    if att.tag.split('}')[-1] == 'X_location':
                        loc = att.text
        host.update(
            {uid: {
                'name': fname,
                'devtype': devtype,
                'icon': icon,
                'services': svc,
                'type': typ,
                'network': net,
                'location': location,
                'loc': loc}})
#         log.debug(host)
        return host

    def subscribe(self, *args, **kwargs):
        if args[0][args[0].keys()[0]]['network'] == 'lan':
            return self.subscribe_classic(*args, **kwargs)
        else:
            return self.subscribe_cloud(*args, **kwargs)

    def subscribe_classic(
            self, device, svc, var, callback_fct=None,
            callback_args=()):
        if not callback_fct:
            callback_fct = self.log.debug
        name = device.keys()[0]
        dev = device[name]

        def subscribe_failed(err, name):
            self.parent.remove_device(name.split('_')[0])

        def subscribed(req, raddr, host, name):
            try:
                uuid = req.headers.getRawHeaders('sid')[0]
                print('subscription uuid = %s' % uuid)
                if name in self.subscriptions:
                    if host in self.subscriptions[name]:
                        self.subscriptions[name][host].update({uuid: raddr})
                    else:
                        self.subscriptions[name].update({host: {uuid: raddr}})
                else:
                    self.subscriptions.update({name: {host: {uuid: raddr}}})
                reactor.callLater(  # @UndefinedVariable
                    20, self.renew_subscription, uuid)
                return name
            except TypeError:
                return subscribe_failed(None, name)

        if self.event_catcher is None:
            self.event_catcher = EventServer()
            self.event_catcher.setServiceParent(self)
        subscription_id = '_'.join((name, svc.split(':')[-2]))
        childpath = '_'.join((subscription_id, 'event',))
#         log.error(childpath)
        if childpath in self.event_catcher.catcher.childs:
            self.event_catcher.catcher.childs[childpath].update(
                {var: (callback_fct, callback_args,)})
        else:
            self.event_catcher.catcher.childs.update(
                {childpath: {var: (callback_fct, callback_args,)}})
#         log.error(self.event_catcher.catcher.childs)
        if subscription_id in self.subscriptions:
            for k, value in self.event_catcher.catcher.unfiltered.items():
                if k == var:
                    if value == 'False':
                        value = False
                    elif value == 'True':
                        value = True
                    if isinstance(callback_args, str)\
                            or isinstance(callback_args, bool):
                        callback_fct(value, callback_args)
                    else:
                        callback_fct(value, *callback_args)
                    del self.event_catcher.catcher.unfiltered[k]
            return defer.succeed(None)
        else:
            self.subscriptions.update({subscription_id: {}})
        clbk = '<' + 'http://' + get_default_v4_address() + ':' +\
            str(self.event_catcher.getPort()) + '/' + childpath + '>'
#             print(clbk)
        headers = {'HOST': [get_default_v4_address() + ':' +
                            str(self.event_catcher.getPort())],
                   'CALLBACK': [clbk],
                   'NT': ['upnp:event'],
                   'TIMEOUT': ['Second-25']}
        if svc in dev['services']:
            self.log.error(svc)
            addr = dev['services'][svc]['eventSubURL']
            self.log.error(addr)
            d = self.agent.request(
                'SUBSCRIBE',
                addr,
                Headers(headers))
            d.addCallbacks(
                subscribed,
                subscribe_failed,
                callbackArgs=(addr, headers['HOST'][0], subscription_id),
                errbackArgs=(subscription_id,))
            return d
#         log.error(dev['services'])
        return defer.fail(Exception('Service unknow'))

    def renew_subscription(self, sid):

        def renewed(res):
            #             print('subscription %s successfully renewed' % sid)
            reactor.callLater(  # @UndefinedVariable
                20, self.renew_subscription, sid)

        def failed(res):
            for name in self.subscriptions:
                for host in self.subscriptions[name]:
                    if sid in self.subscriptions[name][host]:
                        del self.subscriptions[name][host][sid]
                        self.parent.remove_device(name.split('_')[0])
        for name in self.subscriptions:
            for host in self.subscriptions[name]:
                if sid in self.subscriptions[name][host]:
                    headers = {'HOST': [host], 'SID': [sid],
                               'TIMEOUT': ['Second-25']}
                    d = self.agent.request(
                        'SUBSCRIBE',
                        self.subscriptions[name][host][sid],
                        Headers(headers))
                    d.addCallbacks(renewed, failed)
                    return d

    def unsubscribe(self, name):
        print('unsuscribe: %s' % name)
        deferreds = []
        if name in self.subscriptions:
            for host in self.subscriptions[name]:
                for sid in self.subscriptions[name][host]:
                    deferreds.append(self.unsubscribe_host(
                        sid,
                        host,
                        self.subscriptions[name][host][sid], name))
        if name in self.subscriptions_cloud:
            return self.unsubscribe_cloud(name)
        if len(deferreds) > 0:
            #             print(deferreds)
            d = defer.DeferredList(deferreds)
        else:
            d = defer.succeed('nothing to do')
        return d

    def unsubscribe_cloud(self, name):

        def unsubscribed(name, d, res):
            if res['type'] == 'result':
                #                 print('unsubscribed: %s' % name)
                del self.subscriptions_cloud[name]
                print('ok')
                d.callback(None)
            else:
                d.errback(Exception(res.toXml()))

        d = defer.Deferred()
        iq = IQ(self.xmlstream, 'set')
        ps = domish.Element(('http://jabber.org/protocol/pubsub', 'pubsub'))
        unsubscribe = domish.Element((None, 'unsubscribe'))
        unsubscribe['node'] = name
        unsubscribe['jid'] = self.jid.full()
        ps.addChild(unsubscribe)
        iq.addChild(ps)
        iq.addCallback(unsubscribed, name, d)
        iq.send(to='pubsub.' + self.jid.host)
        return d

    def unsubscribe_host(self, sid, host, addr, name=None):
        #  log.debug(
        #     'unsubscribe uuid host addr: %s %s %s' % (sid, host, addr))

        def unsubscribed(res):
            #             print('subscription %s successfully cancelled' % sid)
            if name:
                if len(self.subscriptions[name][host]) == 1:
                    del self.subscriptions[name]
                else:
                    del self.subscriptions[name][host][sid]
            return res

        headers = {'HOST': [host], 'SID': [sid]}
        d = self.agent.request(
            'UNSUBSCRIBE',
            addr,
            Headers(headers))
        d.addCallback(unsubscribed)
        return d

    def subscribe_cloud(
            self, device, svc, var, callback_fct=None, callback_args=()):
        #         print('suscribe to %s' % var)
        name = device.keys()[0]
        dev = device[name]
        if not callback_fct:
            callback_fct = self.log.debug
        d = defer.Deferred()

        def subscribe_failed(err, name):
            self.parent.remove_device(name.split('_')[0])

        def subscribed(node_name, deferred, iq):
            if iq['type'] == 'result':
                self.subscriptions_cloud[str(node_name)] = True
#                 print('%s suscribed !' % str(node_name))
#                 iq = IQ(self.xmlstream, 'get')
#                 ps = domish.Element(
#                     ('http://jabber.org/protocol/pubsub', 'pubsub'))
#                 items = domish.Element((None, 'items'))
#                 items['node'] = node_name
#                 items['max_items'] = '1'
#                 ps.addChild(items)
#                 iq.addChild(ps)
#                 iq.addCallback(self.on_event)
#                 iq.send(to='pubsub.' + self.jid.host)
#                 print(iq.toXml())
                deferred.callback(str(node_name))
            else:
                deferred.errback(Exception('subscription to %s failed: %s'
                                           % (node_name, iq.toXml())))

        if svc in dev['services']:
            #             print('service %s ok' % svc)
            #             print('subscriptions :%s' % self.subscriptions_cloud)
            if not self.cloud_event_catcher:
                self.cloud_event_catcher = CloudEventCatcher(
                    {}, {}, logger=self.log)
            subscription_name = '/'.join((dev['location'], svc, var))
            #  subscription_service = svc
            if subscription_name in self.cloud_event_catcher.callbacks:
                self.cloud_event_catcher.callbacks[subscription_name].update(
                    {var: (callback_fct, callback_args,)})
            else:
                self.cloud_event_catcher.callbacks.update(
                    {subscription_name: {var: (callback_fct, callback_args,)}})
#             if var in self.cloud_event_catcher.callbacks:
#                 self.cloud_event_catcher.callbacks[var].update(
#                     {var: (callback_fct, callback_args,)})
#             else:
#                 self.cloud_event_catcher.callbacks.update(
#                     {var: {var: (callback_fct, callback_args,)}})
    #         log.error(self.event_catcher.catcher.childs)
            if subscription_name in self.subscriptions_cloud:
                if self.subscriptions_cloud[subscription_name]:
                    #                     print('already subscribed: %s' % subscription_name)
                    for k, value in\
                            self.cloud_event_catcher.unfiltered_dict.items():
                        #                         print('is %s == %s ?' % (k, var))
                        if k == var:
                            if value == 'False':
                                value = False
                            elif value == 'True':
                                value = True
                            if isinstance(callback_args, str)\
                                    or isinstance(callback_args, bool):
                                callback_fct(value, callback_args)
                            else:
                                callback_fct(value, *callback_args)
                            del self.cloud_event_catcher.unfiltered_dict[k]
                    return defer.succeed(None)
            self.subscriptions_cloud.update({str(subscription_name): False})
#             print(subscription_name)
#             print(subscription_service)
            iq = IQ(self.xmlstream, 'set')
            ps = domish.Element(
                ('http://jabber.org/protocol/pubsub', 'pubsub'))
            subscribe = domish.Element((None, 'subscribe'))
            subscribe['node'] = subscription_name
            subscribe['jid'] = self.jid.full()
            ps.addChild(subscribe)
            iq.addChild(ps)
            iq.addCallback(subscribed, subscription_name, d)
            iq.send(to='pubsub.' + self.jid.host)
            return d
        return defer.fail(Exception('Service unknow'))

    def get_client(self, device, service):
        if self.xmldir is not None:
            client = None
        else:
            import importlib
            module_name = service.split(':')[-2]
            app = getattr(importlib.import_module(
                'upnpy_spyne.services.templates.' + module_name.lower()),
                module_name)
            if device['network'] == 'lan':
                client = Client(
                    device['services'][service]['controlURL'],
                    Application([app], app.tns,
                                in_protocol=Soap11(), out_protocol=Soap11()))
                client.set_options(
                    out_header={'Content-Type': ['text/xml;charset="utf-8"'],
                                'Soapaction': [app.tns]})
            else:
                url = (self.xmlstream, device['location'],)
                client = Client(
                    url,
                    Application([app], app.tns,
                                in_protocol=Soap11(xml_declaration=False),
                                out_protocol=Soap11(xml_declaration=False)),
                    cloud=True)
#                 print('**********%s' % service)
#                 print(device['services'][service])
        return client

    def call(self, device, service, func, params=()):
        if isinstance(device, dict):
            devname = device.keys()[0]
            dev = device[devname]
        else:
            devname = device
            dev = self.devices[device]
        if devname not in self._services:
            client = self.get_client(dev, service)
            self._services.update({devname: {service: client.service}})
        elif service not in self._services[devname]:
            client = self.get_client(dev, service)
            self._services[devname].update({service: client.service})
        try:
            f = getattr(
                self._services[devname][service], func)
        except AttributeError:
            self.log.error(
                'function %s not found for service %s' % (func, service))
            return defer.fail(Exception(
                'function %s not found for service %s' % (func, service)))
        try:
            if len(params) > 0:
                if isinstance(params, str):
                    d = f(params)
                else:
                    d = f(*params)
            else:
                d = f()
        except TypeError:
            #  boolean has no len
            d = f(params)
        d.addErrback(
            lambda failure, fname: self.log.error(
                '%s call failed : %s' % (fname, failure.getErrorMessage())),
            func)
        return d
Esempio n. 17
0
from twisted.logger import Logger

logger = Logger()

logger.warn("hendrix.resources is being deprecated.  Please see hendrix.facilities.resources.")

from hendrix.facilities.resources import *
Esempio n. 18
0
class Miner(NucypherTokenActor):
    """
    Ursula baseclass for blockchain operations, practically carrying a pickaxe.
    """

    __current_period_sample_rate = 10

    class MinerError(NucypherTokenActor.ActorError):
        pass

    def __init__(self, is_me: bool, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self.log = Logger("miner")
        self.is_me = is_me

        if is_me:
            self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)

            # Staking Loop
            self.__current_period = None
            self._abort_on_staking_error = True
            self._staking_task = task.LoopingCall(self._confirm_period)

        else:
            self.token_agent = constants.STRANGER_MINER

        # Everyone!
        self.miner_agent = MinerAgent(blockchain=self.blockchain)

    #
    # Staking
    #
    @only_me
    def stake(self,
              confirm_now=False,
              resume: bool = False,
              expiration: maya.MayaDT = None,
              lock_periods: int = None,
              *args, **kwargs) -> None:

        """High-level staking daemon loop"""

        if lock_periods and expiration:
            raise ValueError("Pass the number of lock periods or an expiration MayaDT; not both.")
        if expiration:
            lock_periods = datetime_to_period(expiration)

        if resume is False:
            _staking_receipts = self.initialize_stake(expiration=expiration,
                                                      lock_periods=lock_periods,
                                                      *args, **kwargs)

        # TODO: Check if this period has already been confirmed
        # TODO: Check if there is an active stake in the current period: Resume staking daemon
        # TODO: Validation and Sanity checks

        if confirm_now:
            self.confirm_activity()

        # record start time and periods
        self.__start_time = maya.now()
        self.__uptime_period = self.miner_agent.get_current_period()
        self.__terminal_period = self.__uptime_period + lock_periods
        self.__current_period = self.__uptime_period
        self.start_staking_loop()

        #
        # Daemon
        #

    @only_me
    def _confirm_period(self):

        period = self.miner_agent.get_current_period()
        self.log.info("Checking for new period. Current period is {}".format(self.__current_period))  # TODO:  set to debug?

        if self.__current_period != period:

            # check for stake expiration
            stake_expired = self.__current_period >= self.__terminal_period
            if stake_expired:
                self.log.info('Stake duration expired')
                return True

            self.confirm_activity()
            self.__current_period = period
            self.log.info("Confirmed activity for period {}".format(self.__current_period))

    @only_me
    def _crash_gracefully(self, failure=None):
        """
        A facility for crashing more gracefully in the event that an exception
        is unhandled in a different thread, especially inside a loop like the learning loop.
        """
        self._crashed = failure
        failure.raiseException()

    @only_me
    def handle_staking_errors(self, *args, **kwargs):
        failure = args[0]
        if self._abort_on_staking_error:
            self.log.critical("Unhandled error during node staking.  Attempting graceful crash.")
            reactor.callFromThread(self._crash_gracefully, failure=failure)
        else:
            self.log.warn("Unhandled error during node learning: {}".format(failure.getTraceback()))

    @only_me
    def start_staking_loop(self, now=True):
        if self._staking_task.running:
            return False
        else:
            d = self._staking_task.start(interval=self.__current_period_sample_rate, now=now)
            d.addErrback(self.handle_staking_errors)
            self.log.info("Started staking loop")
            return d

    @property
    def is_staking(self):
        """Checks if this Miner currently has locked tokens."""
        return bool(self.locked_tokens > 0)

    @property
    def locked_tokens(self):
        """Returns the amount of tokens this miner has locked."""
        return self.miner_agent.get_locked_tokens(miner_address=self.checksum_public_address)

    @property
    def stakes(self) -> Tuple[list]:
        """Read all live stake data from the blockchain and return it as a tuple"""
        stakes_reader = self.miner_agent.get_all_stakes(miner_address=self.checksum_public_address)
        return tuple(stakes_reader)

    @only_me
    def deposit(self, amount: int, lock_periods: int) -> Tuple[str, str]:
        """Public facing method for token locking."""

        approve_txhash = self.token_agent.approve_transfer(amount=amount,
                                                           target_address=self.miner_agent.contract_address,
                                                           sender_address=self.checksum_public_address)

        deposit_txhash = self.miner_agent.deposit_tokens(amount=amount,
                                                         lock_periods=lock_periods,
                                                         sender_address=self.checksum_public_address)

        return approve_txhash, deposit_txhash

    @only_me
    def divide_stake(self,
                     stake_index: int,
                     target_value: int,
                     additional_periods: int = None,
                     expiration: maya.MayaDT = None) -> dict:
        """
        Modifies the unlocking schedule and value of already locked tokens.

        This actor requires that is_me is True, and that the expiration datetime is after the existing
        locking schedule of this miner, or an exception will be raised.

        :param target_value:  The quantity of tokens in the smallest denomination.
        :param expiration: The new expiration date to set.
        :return: Returns the blockchain transaction hash

        """

        if additional_periods and expiration:
            raise ValueError("Pass the number of lock periods or an expiration MayaDT; not both.")

        _first_period, last_period, locked_value = self.miner_agent.get_stake_info(
            miner_address=self.checksum_public_address, stake_index=stake_index)
        if expiration:
            additional_periods = datetime_to_period(datetime=expiration) - last_period

            if additional_periods <= 0:
                raise self.MinerError("Expiration {} must be at least 1 period from now.".format(expiration))

        if target_value >= locked_value:
            raise self.MinerError("Cannot divide stake; Value must be less than the specified stake value.")

        # Ensure both halves are for valid amounts
        validate_stake_amount(amount=target_value)
        validate_stake_amount(amount=locked_value - target_value)

        tx = self.miner_agent.divide_stake(miner_address=self.checksum_public_address,
                                           stake_index=stake_index,
                                           target_value=target_value,
                                           periods=additional_periods)

        self.blockchain.wait_for_receipt(tx)
        return tx

    @only_me
    def __validate_stake(self, amount: int, lock_periods: int) -> bool:

        assert validate_stake_amount(amount=amount)  # TODO: remove assertions..?
        assert validate_locktime(lock_periods=lock_periods)

        if not self.token_balance >= amount:
            raise self.MinerError("Insufficient miner token balance ({balance})".format(balance=self.token_balance))
        else:
            return True

    @only_me
    def initialize_stake(self,
                         amount: int,
                         lock_periods: int = None,
                         expiration: maya.MayaDT = None,
                         entire_balance: bool = False) -> dict:
        """
        High level staking method for Miners.

        :param amount: Amount of tokens to stake denominated in the smallest unit.
        :param lock_periods: Duration of stake in periods.
        :param expiration: A MayaDT object representing the time the stake expires; used to calculate lock_periods.
        :param entire_balance: If True, stake the entire balance of this node, or the maximum possible.

        """

        if lock_periods and expiration:
            raise ValueError("Pass the number of lock periods or an expiration MayaDT; not both.")
        if entire_balance and amount:
            raise self.MinerError("Specify an amount or entire balance, not both")

        if expiration:
            lock_periods = calculate_period_duration(future_time=expiration)
        if entire_balance is True:
            amount = self.token_balance

        staking_transactions = OrderedDict()  # type: OrderedDict # Time series of txhases

        # Validate
        assert self.__validate_stake(amount=amount, lock_periods=lock_periods)

        # Transact
        approve_txhash, initial_deposit_txhash = self.deposit(amount=amount, lock_periods=lock_periods)
        self._transaction_cache.append((datetime.utcnow(), initial_deposit_txhash))

        self.log.info("{} Initialized new stake: {} tokens for {} periods".format(self.checksum_public_address, amount, lock_periods))
        return staking_transactions

    #
    # Reward and Collection
    #

    @only_me
    def confirm_activity(self) -> str:
        """Miner rewarded for every confirmed period"""

        txhash = self.miner_agent.confirm_activity(node_address=self.checksum_public_address)
        self._transaction_cache.append((datetime.utcnow(), txhash))

        return txhash

    @only_me
    def mint(self) -> Tuple[str, str]:
        """Computes and transfers tokens to the miner's account"""

        mint_txhash = self.miner_agent.mint(node_address=self.checksum_public_address)
        self._transaction_cache.append((datetime.utcnow(), mint_txhash))

        return mint_txhash

    @only_me
    def collect_policy_reward(self, policy_manager):
        """Collect rewarded ETH"""

        policy_reward_txhash = policy_manager.collect_policy_reward(collector_address=self.checksum_public_address)
        self._transaction_cache.append((datetime.utcnow(), policy_reward_txhash))

        return policy_reward_txhash

    @only_me
    def collect_staking_reward(self, collector_address: str) -> str:
        """Withdraw tokens rewarded for staking."""

        collection_txhash = self.miner_agent.collect_staking_reward(collector_address=collector_address)
        self._transaction_cache.append((datetime.utcnow(), collection_txhash))

        return collection_txhash
Esempio n. 19
0
def delete_expired_data():
    logger.info("Expiring data")
    contacts.move_expired_data_to_deletion_list()
    function_to_run_in_thread = deferred_function(
        contacts.delete_from_deletion_list)
    deferred = deferToThread(function_to_run_in_thread)
    deferred.addCallback(delete_expired_data_success)
    deferred.addErrback(delete_expired_data_failure)
    return


if 0 != len(servers):
    l1 = task.LoopingCall(get_data_from_neighbors)
    l1.start(float(config.get('neighbor_sync_period', 600.0)))

l2 = task.LoopingCall(delete_expired_data)
l2.start(24 * 60 * 60)

site = twserver.Site(Simple())
port = int(config.get('port', 8080))
reactor.listenTCP(port, site)

# gack, we can't reset this... we will try at another time
# l = task.LoopingCall(reset_log_file)
# l.start(10, now = False)

# This is intentionally at warn level to allow when debugging to wait for it to be ready
logger.warn('Server alive and listening on port %s' % port)
reactor.run()
Esempio n. 20
0
class CsvTsdb():
    """TODO docs
    """
    GET_BYTES = 10 * 1024  # how many bytes to truncate to in GET requests

    app = Klein()

    def __init__(self, filename):
        self.log = Logger()
        self.filename = filename
        self.log.info("CsvTsdb on file {file}", file=self.filename)

    @app.route('/', methods=['POST'])
    def save(self, request):
        try:
            data = request.content.read().decode('utf-8').strip()
            try:
                label, value = data.rsplit(maxsplit=1)
                value = float(value)
            except ValueError as e:  # didn't parse as "label something 47" => assume it is just a label without number
                label, value = data, 1.0  # no value => value 1 (for ease of use)
            if '"' in label:
                raise ValueError('label may not contain a " (double quote)')
            if ',' in label:
                raise ValueError('label may not contain a , (comma)')
        except ValueError as e:
            raise BadRequest(e) from e

        ts = datetime.now(timezone.utc).replace(microsecond=0).isoformat()
        csv_line = '{}, "{}", {}\n'.format(ts, label, value)
        with open(self.filename, 'a') as f:
            f.write(csv_line)

        request.setResponseCode(204)
        return None

    @app.route('/', methods=['GET'])
    def get(self, request):
        # TODO this should eventually do something else -- some more advanced querying
        try:
            with open(self.filename, 'r') as f:
                f.seek(0, io.SEEK_END)
                end = f.tell()
                beg = max(0, end - self.GET_BYTES)
                f.seek(beg, io.SEEK_SET)
                f.readline()  # eat stuff until next newline
                return f.read(
                )  # for now this is synchronous because 10kb isn't that much
        except FileNotFoundError:
            return b''

    @app.handle_errors(BadRequest)
    def handle_errors(self, request, failure):
        self.log.warn(str(failure))
        request.setResponseCode(failure.value.code)
        request.setHeader('Content-Type', 'text/plain')
        return failure.getErrorMessage()

    @property
    def resource(self):
        return self.app.resource()
Esempio n. 21
0
class StakeTracker:

    REFRESH_RATE = 60

    tracking_addresses = set()

    __stakes = dict()  # type: Dict[str: List[Stake]]
    __actions = list()  # type: List[Tuple[Callable, tuple]]

    def __init__(self,
                 checksum_addresses: List[str],
                 refresh_rate: int = None,
                 start_now: bool = False,
                 *args,
                 **kwargs):

        super().__init__(*args, **kwargs)

        self.log = Logger('stake-tracker')
        self.staking_agent = StakingEscrowAgent()

        self._refresh_rate = refresh_rate or self.REFRESH_RATE
        self._tracking_task = task.LoopingCall(self.__update)

        self.__current_period = None
        self.__stakes = dict()
        self.__start_time = NOT_STAKING
        self.__uptime_period = NOT_STAKING
        self.__terminal_period = NOT_STAKING
        self._abort_on_stake_tracking_error = True

        # "load-in":  Read on-chain stakes
        for checksum_address in checksum_addresses:
            if not is_checksum_address(checksum_address):
                raise ValueError(
                    f'{checksum_address} is not a valid EIP-55 checksum address'
                )
            self.tracking_addresses.add(checksum_address)

        if start_now:
            self.start()  # deamonize
        else:
            self.refresh(checksum_addresses=checksum_addresses)  # read-once

    @validate_checksum_address
    def __getitem__(self, checksum_address: str):
        stakes = self.stakes(checksum_address=checksum_address)
        return stakes

    def add_action(self, func: Callable, args=()) -> None:
        self.__actions.append((func, args))

    def clear_actions(self) -> None:
        self.__actions.clear()

    @property
    def current_period(self):
        return self.__current_period

    @validate_checksum_address
    def stakes(self, checksum_address: str) -> List[Stake]:
        """Return all cached stake instances from the blockchain."""
        try:
            return self.__stakes[checksum_address]
        except KeyError:
            return NO_STAKES.bool_value(False)
        except TypeError:
            if self.__stakes in (UNKNOWN_STAKES, NO_STAKES):
                return NO_STAKES.bool_value(False)
            raise

    @validate_checksum_address
    def refresh(self, checksum_addresses: List[str] = None) -> None:
        """Public staking cache invalidation method"""
        return self.__read_stakes(checksum_addresses=checksum_addresses)

    def stop(self) -> None:
        self._tracking_task.stop()
        self.log.info(f"STOPPED STAKE TRACKING")

    def start(self, force: bool = False) -> None:
        """
        High-level stake tracking initialization, this function aims
        to be safely called at any time - For example, it is okay to call
        this function multiple times within the same period.
        """
        if self._tracking_task.running and not force:
            return

        # Record the start time and period
        self.__start_time = maya.now()
        self.__uptime_period = self.staking_agent.get_current_period()
        self.__current_period = self.__uptime_period

        d = self._tracking_task.start(interval=self._refresh_rate)
        d.addErrback(self.handle_tracking_errors)
        self.log.info(
            f"STARTED STAKE TRACKING for {len(self.tracking_addresses)} addresses"
        )

    def _crash_gracefully(self, failure=None) -> None:
        """
        A facility for crashing more gracefully in the event that
        an exception is unhandled in a different thread.
        """
        self._crashed = failure
        failure.raiseException()

    def handle_tracking_errors(self, *args, **kwargs) -> None:
        failure = args[0]
        if self._abort_on_stake_tracking_error:
            self.log.critical(
                f"Unhandled error during node stake tracking. {failure}")
            reactor.callFromThread(self._crash_gracefully, failure=failure)
        else:
            self.log.warn(
                f"Unhandled error during stake tracking: {failure.getTraceback()}"
            )

    def __update(self) -> None:
        self.log.info(
            f"Checking for new period. Current period is {self.__current_period}"
        )
        onchain_period = self.staking_agent.get_current_period(
        )  # < -- Read from contract
        if self.__current_period != onchain_period:
            self.__current_period = onchain_period
            self.__read_stakes()
            for action, args in self.__actions:
                action(*args)

    @validate_checksum_address
    def __read_stakes(self, checksum_addresses: List[str] = None) -> None:
        """Rewrite the local staking cache by reading on-chain stakes"""

        if not checksum_addresses:
            checksum_addresses = self.tracking_addresses

        for checksum_address in checksum_addresses:

            if not is_checksum_address(checksum_address):
                if self._abort_on_stake_tracking_error:
                    raise ValueError(
                        f'{checksum_address} is not a valid EIP-55 checksum address'
                    )
                self.tracking_addresses.remove(checksum_address)  # Prune

            existing_records = len(
                self.stakes(checksum_address=checksum_address))

            # Candidate replacement cache values
            onchain_stakes, terminal_period = list(), 0

            # Read from blockchain
            stakes_reader = self.staking_agent.get_all_stakes(
                staker_address=checksum_address)
            for onchain_index, stake_info in enumerate(stakes_reader):

                if not stake_info:
                    onchain_stake = EMPTY_STAKING_SLOT

                else:
                    onchain_stake = Stake.from_stake_info(
                        checksum_address=checksum_address,
                        stake_info=stake_info,
                        index=onchain_index)

                    # rack the latest terminal period
                    if onchain_stake.end_period > terminal_period:
                        terminal_period = onchain_stake.end_period

                # Store the replacement stake
                onchain_stakes.append(onchain_stake)

            # Commit the new stake and terminal values to the cache
            if not onchain_stakes:
                self.__stakes[checksum_address] = NO_STAKES.bool_value(False)
            else:
                self.__terminal_period = terminal_period
                self.__stakes[checksum_address] = onchain_stakes
                new_records = existing_records - len(
                    self.__stakes[checksum_address])
                self.log.debug(
                    f"Updated local staking cache ({new_records} new stakes).")

            # Record most recent cache update
            self.__updated = maya.now()
Esempio n. 22
0
class NodeConfiguration:

    _name = 'ursula'
    _character_class = Ursula

    DEFAULT_CONFIG_FILE_LOCATION = os.path.join(DEFAULT_CONFIG_ROOT,
                                                '{}.config'.format(_name))
    DEFAULT_OPERATING_MODE = 'decentralized'
    NODE_SERIALIZER = binascii.hexlify
    NODE_DESERIALIZER = binascii.unhexlify

    __CONFIG_FILE_EXT = '.config'
    __CONFIG_FILE_DESERIALIZER = json.loads
    __TEMP_CONFIGURATION_DIR_PREFIX = "nucypher-tmp-"
    __DEFAULT_NETWORK_MIDDLEWARE_CLASS = RestMiddleware
    __DEFAULT_NODE_STORAGE = LocalFileBasedNodeStorage

    __REGISTRY_NAME = 'contract_registry.json'
    REGISTRY_SOURCE = os.path.join(
        BASE_DIR, __REGISTRY_NAME)  # TODO: #461 Where will this be hosted?

    class ConfigurationError(RuntimeError):
        pass

    class InvalidConfiguration(ConfigurationError):
        pass

    def __init__(
            self,
            temp: bool = False,
            config_root: str = DEFAULT_CONFIG_ROOT,
            passphrase: str = None,
            auto_initialize: bool = False,
            auto_generate_keys: bool = False,
            config_file_location: str = DEFAULT_CONFIG_FILE_LOCATION,
            keyring_dir: str = None,
            checksum_address: str = None,
            is_me: bool = True,
            federated_only: bool = False,
            network_middleware: RestMiddleware = None,
            registry_source: str = REGISTRY_SOURCE,
            registry_filepath: str = None,
            import_seed_registry: bool = False,

            # Learner
            learn_on_same_thread: bool = False,
            abort_on_learning_error: bool = False,
            start_learning_now: bool = True,

            # TLS
            known_certificates_dir: str = None,

            # Metadata
            known_nodes: set = None,
            node_storage: NodeStorage = None,
            load_metadata: bool = True,
            save_metadata: bool = True) -> None:

        # Logs
        self.log = Logger(self.__class__.__name__)

        # Known Nodes
        self.known_nodes_dir = UNINITIALIZED_CONFIGURATION
        self.known_certificates_dir = known_certificates_dir or UNINITIALIZED_CONFIGURATION

        # Keyring
        self.keyring = UNINITIALIZED_CONFIGURATION
        self.keyring_dir = keyring_dir or UNINITIALIZED_CONFIGURATION

        # Contract Registry
        self.__registry_source = registry_source
        self.registry_filepath = registry_filepath or UNINITIALIZED_CONFIGURATION

        # Configuration Root Directory
        self.config_root = UNINITIALIZED_CONFIGURATION
        self.__temp = temp
        if self.__temp:
            self.__temp_dir = UNINITIALIZED_CONFIGURATION
            self.node_storage = InMemoryNodeStorage(
                federated_only=federated_only, character_class=self.__class__)
        else:
            self.config_root = config_root
            self.__temp_dir = LIVE_CONFIGURATION
            from nucypher.characters.lawful import Ursula  # TODO : Needs cleanup
            self.node_storage = node_storage or self.__DEFAULT_NODE_STORAGE(
                federated_only=federated_only, character_class=Ursula)
            self.__cache_runtime_filepaths()
        self.config_file_location = config_file_location

        #
        # Identity
        #
        self.federated_only = federated_only
        self.checksum_address = checksum_address
        self.is_me = is_me
        if self.is_me:
            #
            # Self
            #
            if checksum_address and not self.__temp:
                self.read_keyring()
            self.network_middleware = network_middleware or self.__DEFAULT_NETWORK_MIDDLEWARE_CLASS(
            )
        else:
            #
            # Stranger
            #
            self.known_nodes_dir = STRANGER_CONFIGURATION
            self.known_certificates_dir = STRANGER_CONFIGURATION
            self.node_storage = STRANGER_CONFIGURATION
            self.keyring_dir = STRANGER_CONFIGURATION
            self.keyring = STRANGER_CONFIGURATION
            self.network_middleware = STRANGER_CONFIGURATION
            if network_middleware:
                raise self.ConfigurationError(
                    "Cannot configure a stranger to use network middleware")

        #
        # Learner
        #
        self.known_nodes = known_nodes or set()
        self.learn_on_same_thread = learn_on_same_thread
        self.abort_on_learning_error = abort_on_learning_error
        self.start_learning_now = start_learning_now
        self.save_metadata = save_metadata
        self.load_metadata = load_metadata

        #
        # Auto-Initialization
        #
        if auto_initialize:
            self.initialize(no_registry=not import_seed_registry
                            or federated_only,
                            wallet=auto_generate_keys and not federated_only,
                            encrypting=auto_generate_keys,
                            passphrase=passphrase)

    def __call__(self, *args, **kwargs):
        return self.produce(*args, **kwargs)

    def cleanup(self) -> None:
        if self.__temp:
            self.__temp_dir.cleanup()

    @property
    def temp(self):
        return self.__temp

    def produce(self, passphrase: str = None, **overrides):
        """Initialize a new character instance and return it"""
        if not self.temp:
            self.read_keyring()
            self.keyring.unlock(passphrase=passphrase)
        merged_parameters = {
            **self.static_payload,
            **self.dynamic_payload,
            **overrides
        }
        return self._character_class(**merged_parameters)

    @staticmethod
    def _read_configuration_file(filepath) -> dict:
        with open(filepath, 'r') as file:
            payload = NodeConfiguration.__CONFIG_FILE_DESERIALIZER(file.read())
        return payload

    @classmethod
    def from_configuration_file(cls, filepath,
                                **overrides) -> 'NodeConfiguration':
        """Initialize a NodeConfiguration from a JSON file."""
        from nucypher.config.storages import NodeStorage  # TODO: move
        NODE_STORAGES = {
            storage_class._name: storage_class
            for storage_class in NodeStorage.__subclasses__()
        }

        payload = cls._read_configuration_file(filepath=filepath)

        # Make NodeStorage
        storage_payload = payload['node_storage']
        storage_type = storage_payload[NodeStorage._TYPE_LABEL]
        storage_class = NODE_STORAGES[storage_type]
        node_storage = storage_class.from_payload(
            payload=storage_payload,
            character_class=cls._character_class,
            federated_only=payload['federated_only'],
            serializer=cls.NODE_SERIALIZER,
            deserializer=cls.NODE_DESERIALIZER)

        payload.update(dict(node_storage=node_storage))
        return cls(is_me=True, **{**payload, **overrides})

    def to_configuration_file(self, filepath: str = None) -> str:
        """Write the static_payload to a JSON file."""
        if filepath is None:
            filename = '{}{}'.format(self._name.lower(),
                                     self.__CONFIG_FILE_EXT)
            filepath = os.path.join(self.config_root, filename)

        payload = self.static_payload
        del payload['is_me']  # TODO
        # Save node connection data
        payload.update(dict(node_storage=self.node_storage.payload()))

        with open(filepath, 'w') as config_file:
            config_file.write(json.dumps(payload, indent=4))
        return filepath

    def validate(self, config_root: str, no_registry=False) -> bool:
        # Top-level
        if not os.path.exists(config_root):
            raise self.ConfigurationError(
                'No configuration directory found at {}.'.format(config_root))

        # Sub-paths
        filepaths = self.runtime_filepaths
        if no_registry:
            del filepaths['registry_filepath']

        for field, path in filepaths.items():
            if not os.path.exists(path):
                message = 'Missing configuration directory {}.'
                raise NodeConfiguration.InvalidConfiguration(
                    message.format(path))
        return True

    @property
    def static_payload(self) -> dict:
        """Exported static configuration values for initializing Ursula"""
        payload = dict(
            # Identity
            is_me=self.is_me,
            federated_only=self.federated_only,  # TODO: 466
            checksum_address=self.checksum_address,
            keyring_dir=self.keyring_dir,
            known_certificates_dir=self.known_certificates_dir,

            # Behavior
            learn_on_same_thread=self.learn_on_same_thread,
            abort_on_learning_error=self.abort_on_learning_error,
            start_learning_now=self.start_learning_now,
            save_metadata=self.save_metadata)
        return payload

    @property
    def dynamic_payload(self, **overrides) -> dict:
        """Exported dynamic configuration values for initializing Ursula"""
        if self.load_metadata:
            self.known_nodes.update(
                self.node_storage.all(federated_only=self.federated_only))
        payload = dict(network_middleware=self.network_middleware
                       or self.__DEFAULT_NETWORK_MIDDLEWARE_CLASS(),
                       known_nodes=self.known_nodes,
                       node_storage=self.node_storage,
                       crypto_power_ups=self.derive_node_power_ups() or None)
        if overrides:
            self.log.debug(
                "Overrides supplied to dynamic payload for {}".format(
                    self.__class__.__name__))
            payload.update(overrides)
        return payload

    @property
    def runtime_filepaths(self):
        filepaths = dict(config_root=self.config_root,
                         keyring_dir=self.keyring_dir,
                         known_certificates_dir=self.known_certificates_dir,
                         registry_filepath=self.registry_filepath)
        return filepaths

    @staticmethod
    def generate_runtime_filepaths(config_root: str) -> dict:
        """Dynamically generate paths based on configuration root directory"""
        known_nodes_dir = os.path.join(config_root, 'known_nodes')
        filepaths = dict(
            config_root=config_root,
            keyring_dir=os.path.join(config_root, 'keyring'),
            known_nodes_dir=known_nodes_dir,
            known_certificates_dir=os.path.join(known_nodes_dir,
                                                'certificates'),
            registry_filepath=os.path.join(config_root,
                                           NodeConfiguration.__REGISTRY_NAME))
        return filepaths

    def __cache_runtime_filepaths(self) -> None:
        """Generate runtime filepaths and cache them on the config object"""
        filepaths = self.generate_runtime_filepaths(
            config_root=self.config_root)
        for field, filepath in filepaths.items():
            if getattr(self, field) is UNINITIALIZED_CONFIGURATION:
                setattr(self, field, filepath)

    def derive_node_power_ups(self) -> List[CryptoPowerUp]:
        power_ups = list()
        if self.is_me and not self.temp:
            for power_class in self._character_class._default_crypto_powerups:
                power_up = self.keyring.derive_crypto_power(power_class)
                power_ups.append(power_up)
        return power_ups

    def initialize(self,
                   passphrase: str,
                   no_registry: bool = False,
                   wallet: bool = False,
                   encrypting: bool = False,
                   tls: bool = False,
                   host: str = None,
                   curve=None,
                   no_keys: bool = False) -> str:
        """Write a new configuration to the disk, and with the configured node store."""

        #
        # Create Config Root
        #
        if self.__temp:
            self.__temp_dir = TemporaryDirectory(
                prefix=self.__TEMP_CONFIGURATION_DIR_PREFIX)
            self.config_root = self.__temp_dir.name
        else:
            try:
                os.mkdir(self.config_root, mode=0o755)
            except FileExistsError:
                message = "There are existing configuration files at {}".format(
                    self.config_root)
                raise self.ConfigurationError(message)
            except FileNotFoundError:
                message = "Cannot write configuration files because the directory {} does not exist."
                raise self.ConfigurationError(message)

        #
        # Create Config Subdirectories
        #
        self.__cache_runtime_filepaths()
        try:

            # Directories
            os.mkdir(self.keyring_dir, mode=0o700)  # keyring
            os.mkdir(self.known_nodes_dir, mode=0o755)  # known_nodes
            os.mkdir(self.known_certificates_dir, mode=0o755)  # known_certs
            self.node_storage.initialize()  # TODO: default known dir

            if not self.temp and not no_keys:
                # Keyring
                self.write_keyring(passphrase=passphrase,
                                   wallet=wallet,
                                   encrypting=encrypting,
                                   tls=tls,
                                   host=host,
                                   tls_curve=curve)

            # Registry
            if not no_registry and not self.federated_only:
                self.write_registry(output_filepath=self.registry_filepath,
                                    source=self.__registry_source,
                                    blank=no_registry)

        except FileExistsError:
            existing_paths = [
                os.path.join(self.config_root, f)
                for f in os.listdir(self.config_root)
            ]
            message = "There are pre-existing nucypher installation files at {}: {}".format(
                self.config_root, existing_paths)
            self.log.critical(message)
            raise NodeConfiguration.ConfigurationError(message)

        if not self.__temp:
            self.validate(config_root=self.config_root,
                          no_registry=no_registry or self.federated_only)
        return self.config_root

    def read_known_nodes(self):
        self.known_nodes.update(
            self.node_storage.all(federated_only=self.federated_only))
        return self.known_nodes

    def read_keyring(self, *args, **kwargs):
        if self.checksum_address is None:
            raise self.ConfigurationError(
                "No account specified to unlock keyring")
        self.keyring = NucypherKeyring(keyring_root=self.keyring_dir,
                                       account=self.checksum_address,
                                       *args,
                                       **kwargs)

    def write_keyring(
        self,
        passphrase: str,
        encrypting: bool,
        wallet: bool,
        tls: bool,
        host: str,
        tls_curve: EllipticCurve = None,
    ) -> NucypherKeyring:

        self.keyring = NucypherKeyring.generate(passphrase=passphrase,
                                                encrypting=encrypting,
                                                wallet=wallet,
                                                tls=tls,
                                                host=host,
                                                curve=tls_curve,
                                                keyring_root=self.keyring_dir)

        # TODO: Operating mode switch #466
        if self.federated_only or not wallet:
            self.checksum_address = self.keyring.federated_address
        else:
            self.checksum_address = self.keyring.checksum_address
        if tls:
            self.certificate_filepath = self.keyring.certificate_filepath

        return self.keyring

    def write_registry(self,
                       output_filepath: str = None,
                       source: str = None,
                       force: bool = False,
                       blank=False) -> str:

        if force and os.path.isfile(output_filepath):
            raise self.ConfigurationError(
                'There is an existing file at the registry output_filepath {}'.
                format(output_filepath))

        output_filepath = output_filepath or self.registry_filepath
        source = source or self.REGISTRY_SOURCE

        if not blank and not self.temp:
            # Validate Registry
            with open(source, 'r') as registry_file:
                try:
                    json.loads(registry_file.read())
                except JSONDecodeError:
                    message = "The registry source {} is not valid JSON".format(
                        source)
                    self.log.critical(message)
                    raise self.ConfigurationError(message)
                else:
                    self.log.debug(
                        "Source registry {} is valid JSON".format(source))

        else:
            self.log.warn("Writing blank registry")
            open(output_filepath, 'w').close()  # write blank

        self.log.info(
            "Successfully wrote registry to {}".format(output_filepath))
        return output_filepath
Esempio n. 23
0
class RetrievalClient:
    """
    Capsule frag retrieval machinery shared between Bob and Porter.
    """
    def __init__(self, learner: Learner):
        self._learner = learner
        self.log = Logger(self.__class__.__name__)

    def _ensure_ursula_availability(self,
                                    treasure_map: TreasureMap,
                                    timeout=10):
        """
        Make sure we know enough nodes from the treasure map to decrypt;
        otherwise block and wait for them to come online.
        """

        # OK, so we're going to need to do some network activity for this retrieval.
        # Let's make sure we've seeded.
        if not self._learner.done_seeding:
            self._learner.learn_from_teacher_node()

        ursulas_in_map = treasure_map.destinations.keys()

        # TODO (#1995): when that issue is fixed, conversion is no longer needed
        ursulas_in_map = [
            to_checksum_address(address) for address in ursulas_in_map
        ]

        all_known_ursulas = self._learner.known_nodes.addresses()

        # Push all unknown Ursulas from the map in the queue for learning
        unknown_ursulas = ursulas_in_map - all_known_ursulas

        # If we know enough to decrypt, we can proceed.
        known_ursulas = ursulas_in_map & all_known_ursulas
        if len(known_ursulas) >= treasure_map.threshold:
            return

        # | <--- shares                                            ---> |
        # | <--- threshold               ---> | <--- allow_missing ---> |
        # | <--- known_ursulas ---> | <--- unknown_ursulas         ---> |
        allow_missing = len(treasure_map.destinations) - treasure_map.threshold
        self._learner.block_until_specific_nodes_are_known(
            unknown_ursulas,
            timeout=timeout,
            allow_missing=allow_missing,
            learn_on_this_thread=True)

    def _request_reencryption(
        self,
        ursula: 'Ursula',
        reencryption_request: ReencryptionRequest,
        alice_verifying_key: PublicKey,
        policy_encrypting_key: PublicKey,
        bob_encrypting_key: PublicKey,
    ) -> Dict['Capsule', 'VerifiedCapsuleFrag']:
        """
        Sends a reencryption request to a single Ursula and processes the results.

        Returns reencrypted capsule frags matched to corresponding capsules.
        """

        middleware = self._learner.network_middleware

        try:
            response = middleware.reencrypt(ursula,
                                            bytes(reencryption_request))
        except NodeSeemsToBeDown as e:
            # TODO: What to do here?  Ursula isn't supposed to be down.  NRN
            message = (
                f"Ursula ({ursula}) seems to be down "
                f"while trying to complete ReencryptionRequest: {reencryption_request}"
            )
            self.log.info(message)
            raise RuntimeError(message) from e
        except middleware.NotFound as e:
            # This Ursula claims not to have a matching KFrag.  Maybe this has been revoked?
            # TODO: What's the thing to do here?
            # Do we want to track these Ursulas in some way in case they're lying?  #567
            message = (
                f"Ursula ({ursula}) claims not to not know of the policy {reencryption_request.hrac}. "
                f"Has access been revoked?")
            self.log.warn(message)
            raise RuntimeError(message) from e
        except middleware.UnexpectedResponse:
            raise  # TODO: Handle this

        try:
            reencryption_response = ReencryptionResponse.from_bytes(
                response.content)
        except Exception as e:
            message = f"Ursula ({ursula}) returned an invalid response: {e}."
            self.log.warn(message)
            raise RuntimeError(message)

        ursula_verifying_key = ursula.stamp.as_umbral_pubkey()

        try:
            verified_cfrags = reencryption_response.verify(
                capsules=reencryption_request.capsules,
                alice_verifying_key=alice_verifying_key,
                ursula_verifying_key=ursula_verifying_key,
                policy_encrypting_key=policy_encrypting_key,
                bob_encrypting_key=bob_encrypting_key,
            )
        except InvalidSignature as e:
            self.log.warn(str(e))
            raise
        except VerificationError:
            # In future we may want to remember this Ursula and do something about it
            self.log.warn(
                "Failed to verify capsule frags in the ReencryptionResponse")
            raise
        except Exception as e:
            message = f"Failed to verify the ReencryptionResponse: {e}"
            self.log.warn(message)
            raise RuntimeError(message)

        return {
            capsule: vcfrag
            for capsule, vcfrag in zip(reencryption_request.capsules,
                                       verified_cfrags)
        }

    def retrieve_cfrags(
        self,
        treasure_map: TreasureMap,
        retrieval_kits: Sequence[RetrievalKit],
        alice_verifying_key: PublicKey,  # KeyFrag signer's key
        bob_encrypting_key: PublicKey,  # User's public key (reencryption target)
        bob_verifying_key: PublicKey,
    ) -> List[RetrievalResult]:

        self._ensure_ursula_availability(treasure_map)

        retrieval_plan = RetrievalPlan(treasure_map=treasure_map,
                                       retrieval_kits=retrieval_kits)

        while not retrieval_plan.is_complete():
            # TODO (#2789): Currently we'll only query one Ursula once during the retrieval.
            # Alternatively we may re-query Ursulas that were offline until the timeout expires.

            work_order = retrieval_plan.get_work_order()

            # TODO (#1995): when that issue is fixed, conversion is no longer needed
            ursula_checksum_address = to_checksum_address(
                work_order.ursula_address)

            if ursula_checksum_address not in self._learner.known_nodes:
                continue

            ursula = self._learner.known_nodes[ursula_checksum_address]
            reencryption_request = ReencryptionRequest(
                hrac=treasure_map.hrac,
                capsules=work_order.capsules,
                encrypted_kfrag=treasure_map.destinations[
                    work_order.ursula_address],
                bob_verifying_key=bob_verifying_key,
                publisher_verifying_key=treasure_map.publisher_verifying_key)

            try:
                cfrags = self._request_reencryption(
                    ursula=ursula,
                    reencryption_request=reencryption_request,
                    alice_verifying_key=alice_verifying_key,
                    policy_encrypting_key=treasure_map.policy_encrypting_key,
                    bob_encrypting_key=bob_encrypting_key)
            except Exception as e:
                # TODO (#2789): at this point we can separate the exceptions to "acceptable"
                # (Ursula is not reachable) and "unacceptable" (Ursula provided bad results).
                self.log.warn(f"Ursula {ursula} failed to reencrypt: {e}")
                continue

            retrieval_plan.update(work_order, cfrags)

        return retrieval_plan.results()
Esempio n. 24
0
class OurStreamProtocol(Protocol):
    """Protocol implementing ShinySDR's WebSocket service.
    
    This protocol's transport should be a txWS WebSocket transport.
    """
    def __init__(self, caps, subscription_context):
        self.__log = Logger()
        self.__subscription_context = subscription_context
        self._caps = caps
        self._seenValues = {}
        self.inner = None

    def dataReceived(self, data):
        """Twisted Protocol implementation.
        
        Additionally, txWS takes no care with exceptions here, so we catch and log."""
        # pylint: disable=broad-except
        try:
            if self.inner is None:
                # To work around txWS's lack of a notification when the URL is available, all clients send a dummy first message.
                self.__dispatch_url()
            else:
                self.inner.dataReceived(data)
        except Exception:
            self.__log.failure('Error processing incoming WebSocket message')

    def __dispatch_url(self):
        loc = self.transport.location
        self.__log.info('Stream connection to {url}', url=loc)
        path = [urllib.unquote(x) for x in loc.split('/')]
        assert path[0] == ''
        path[0:1] = []
        cap_string = path[0].decode('utf-8')  # TODO centralize url decoding
        if cap_string in self._caps:
            root_object = self._caps[cap_string]
            path[0:1] = []
        else:
            raise Exception('Unknown cap')  # TODO better error reporting
        if len(path) == 1 and path[0].startswith(b'audio?rate='):
            rate = int(
                json.loads(urllib.unquote(path[0][len(b'audio?rate='):])))
            self.inner = AudioStreamInner(the_reactor, self.__send,
                                          root_object, rate)
        elif len(path) >= 1 and path[0] == CAP_OBJECT_PATH_ELEMENT:
            # note _lookup_block may throw. TODO: Better error reporting
            root_object = _lookup_block(root_object, path[1:])
            self.inner = StateStreamInner(
                self.__send, root_object, loc, self.__subscription_context
            )  # note reuse of loc as HTTP path; probably will regret this
        else:
            raise Exception('Unknown path: %r' % (path, ))

    def connectionMade(self):
        """twisted Protocol implementation"""
        self.transport.setBinaryMode(True)
        # Unfortunately, txWS calls this too soon for transport.location to be available

    def connectionLost(self, reason):
        # pylint: disable=signature-differs
        """twisted Protocol implementation"""
        if self.inner is not None:
            self.inner.connectionLost(reason)

    def __send(self, message, safe_to_drop=False):
        if len(self.transport.transport.dataBuffer) > 1000000:
            # TODO: condition is horrible implementation-diving kludge
            # Don't accumulate indefinite buffer if we aren't successfully getting it onto the network.

            # TODO: There are no tests of this mechanism

            if safe_to_drop:
                self.__log.warn('Dropping data going to stream {url}',
                                url=self.transport.location)
            else:
                self.__log.error(
                    'Dropping connection due to too much data on stream {url}',
                    url=self.transport.location)
                self.transport.close(reason='Too much data buffered')
        else:
            self.transport.write(message)
Esempio n. 25
0
class Miner(NucypherTokenActor):
    """
    Ursula baseclass for blockchain operations, practically carrying a pickaxe.
    """

    __current_period_sample_rate = 60 * 60  # seconds

    class MinerError(NucypherTokenActor.ActorError):
        pass

    def __init__(self,
                 is_me: bool,
                 start_staking_loop: bool = True,
                 economics: TokenEconomics = None,
                 *args,
                 **kwargs) -> None:
        super().__init__(*args, **kwargs)

        self.log = Logger("miner")
        self.is_me = is_me

        if not economics:
            economics = TokenEconomics()
        self.economics = economics

        #
        # Blockchain
        #

        if is_me:
            self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)

            # Staking Loop
            self.__current_period = None
            self._abort_on_staking_error = True
            self._staking_task = task.LoopingCall(self.heartbeat)

        else:
            self.token_agent = STRANGER_MINER

        self.miner_agent = MinerAgent(blockchain=self.blockchain)

        #
        # Stakes
        #

        self.__stakes = UNKNOWN_STAKES
        self.__start_time = NOT_STAKING
        self.__uptime_period = NOT_STAKING
        self.__terminal_period = UNKNOWN_STAKES

        self.__read_stakes()  # "load-in":  Read on-chain stakes

        # Start the callbacks if there are active stakes
        if (self.stakes is not NO_STAKES) and start_staking_loop:
            self.stake()

    #
    # Staking
    #

    @only_me
    def stake(self, confirm_now: bool = True) -> None:
        """
        High-level staking looping call initialization, this function aims
        to be safely called at any time - For example, it is okay to call
        this function multiple times within the same period.
        """
        # Get the last stake end period of all stakes
        terminal_period = max(stake.end_period for stake in self.stakes)

        if confirm_now:
            self.confirm_activity()

        # record start time and periods
        self.__start_time = maya.now()
        self.__uptime_period = self.miner_agent.get_current_period()
        self.__terminal_period = terminal_period
        self.__current_period = self.__uptime_period
        self.start_staking_loop()

    @property
    def last_active_period(self) -> int:
        period = self.miner_agent.get_last_active_period(
            address=self.checksum_address)
        return period

    @only_me
    def _confirm_period(self):

        onchain_period = self.miner_agent.get_current_period(
        )  # < -- Read from contract
        self.log.info("Checking for new period. Current period is {}".format(
            self.__current_period))

        # Check if the period has changed on-chain
        if self.__current_period != onchain_period:

            # Let's see how much time has passed
            # TODO: Follow-up actions for downtime
            missed_periods = onchain_period - self.last_active_period
            if missed_periods:
                self.log.warn(
                    f"MISSED CONFIRMATION - {missed_periods} missed staking confirmations detected!"
                )
                self.__read_stakes()  # Invalidate the stake cache

            # Check for stake expiration and exit
            stake_expired = self.__current_period >= self.__terminal_period
            if stake_expired:
                self.log.info('STOPPED STAKING - Final stake ended.')
                return True

            # Write to Blockchain
            self.confirm_activity()

            # Update local period cache
            self.__current_period = onchain_period
            self.log.info("Confirmed activity for period {}".format(
                self.__current_period))

    def heartbeat(self):
        """Used with LoopingCall"""
        try:
            self._confirm_period()
        except Exception:
            raise

    def _crash_gracefully(self, failure=None):
        """
        A facility for crashing more gracefully in the event that an exception is unhandled in a different thread.
        """
        self._crashed = failure
        failure.raiseException()

    def handle_staking_errors(self, *args, **kwargs):
        failure = args[0]
        if self._abort_on_staking_error:
            self.log.critical(
                "Unhandled error during node staking.  Attempting graceful crash."
            )
            reactor.callFromThread(self._crash_gracefully, failure=failure)
        else:
            self.log.warn("Unhandled error during node learning: {}".format(
                failure.getTraceback()))

    @only_me
    def start_staking_loop(self, now=True) -> None:
        if self._staking_task.running:
            return
        d = self._staking_task.start(
            interval=self.__current_period_sample_rate, now=now)
        d.addErrback(self.handle_staking_errors)
        self.log.info(
            f"STARTED STAKING - Scheduled end period is currently {self.__terminal_period}"
        )

    @property
    def is_staking(self) -> bool:
        """Checks if this Miner currently has active stakes / locked tokens."""
        return bool(self.stakes)

    def locked_tokens(self, periods: int = 0) -> NU:
        """Returns the amount of tokens this miner has locked for a given duration in periods."""
        raw_value = self.miner_agent.get_locked_tokens(
            miner_address=self.checksum_address, periods=periods)
        value = NU.from_nunits(raw_value)
        return value

    @property
    def current_stake(self) -> NU:
        """
        The total number of staked tokens, either locked or unlocked in the current period.
        """

        if self.stakes:
            return NU(sum(int(stake.value) for stake in self.stakes), 'NuNit')
        else:
            return NU.ZERO()

    @only_me
    def divide_stake(self,
                     stake_index: int,
                     target_value: NU,
                     additional_periods: int = None,
                     expiration: maya.MayaDT = None) -> tuple:

        # Calculate duration in periods
        if additional_periods and expiration:
            raise ValueError(
                "Pass the number of lock periods or an expiration MayaDT; not both."
            )

        # Select stake to divide from local cache
        try:
            current_stake = self.stakes[stake_index]
        except KeyError:
            if len(self.stakes):
                message = f"Cannot divide stake - No stake exists with index {stake_index}."
            else:
                message = "Cannot divide stake - There are no active stakes."
            raise Stake.StakingError(message)

        # Calculate stake duration in periods
        if expiration:
            additional_periods = datetime_to_period(
                datetime=expiration) - current_stake.end_period
            if additional_periods <= 0:
                raise Stake.StakingError(
                    f"New expiration {expiration} must be at least 1 period from the "
                    f"current stake's end period ({current_stake.end_period})."
                )

        # Do it already!
        modified_stake, new_stake = current_stake.divide(
            target_value=target_value, additional_periods=additional_periods)

        # Update staking cache
        self.__read_stakes()

        return modified_stake, new_stake

    @only_me
    def initialize_stake(self,
                         amount: NU,
                         lock_periods: int = None,
                         expiration: maya.MayaDT = None,
                         entire_balance: bool = False) -> Stake:
        """Create a new stake."""

        #
        # Duration
        #

        if lock_periods and expiration:
            raise ValueError(
                "Pass the number of lock periods or an expiration MayaDT; not both."
            )
        if expiration:
            lock_periods = calculate_period_duration(future_time=expiration)

        #
        # Value
        #

        if entire_balance and amount:
            raise ValueError("Specify an amount or entire balance, not both")
        if entire_balance:
            amount = self.token_balance
        if not self.token_balance >= amount:
            raise self.MinerError(
                f"Insufficient token balance ({self.token_agent}) for new stake initialization of {amount}"
            )

        # Ensure the new stake will not exceed the staking limit
        if (self.current_stake +
                amount) > self.economics.maximum_allowed_locked:
            raise Stake.StakingError(
                f"Cannot divide stake - Maximum stake value exceeded with a target value of {amount}."
            )

        #
        # Stake
        #

        # Write to blockchain
        new_stake = Stake.initialize_stake(miner=self,
                                           amount=amount,
                                           lock_periods=lock_periods)
        self.__read_stakes()  # Update local staking cache
        return new_stake

    #
    # Staking Cache
    #

    def __read_stakes(self) -> None:
        """Rewrite the local staking cache by reading on-chain stakes"""

        existing_records = len(self.__stakes)

        # Candidate replacement cache values
        onchain_stakes, terminal_period = list(), 0

        # Read from blockchain
        stakes_reader = self.miner_agent.get_all_stakes(
            miner_address=self.checksum_address)

        for onchain_index, stake_info in enumerate(stakes_reader):

            if not stake_info:
                # This stake index is empty on-chain
                onchain_stake = EMPTY_STAKING_SLOT

            else:
                # On-chain stake detected
                onchain_stake = Stake.from_stake_info(miner=self,
                                                      stake_info=stake_info,
                                                      index=onchain_index)

                # Search for the terminal period
                if onchain_stake.end_period > terminal_period:
                    terminal_period = onchain_stake.end_period

            # Store the replacement stake
            onchain_stakes.append(onchain_stake)

        # Commit the new stake and terminal values to the cache
        if not onchain_stakes:
            self.__stakes = NO_STAKES.bool_value(False)
        else:
            self.__terminal_period = terminal_period
            self.__stakes = onchain_stakes

        # Record most recent cache update
        self.__updated = maya.now()
        new_records = existing_records - len(self.__stakes)
        self.log.debug(
            f"Updated local staking cache ({new_records} new records).")

    def refresh_staking_cache(self) -> None:
        """Public staking cache invalidation method"""
        return self.__read_stakes()

    @property
    def stakes(self) -> List[Stake]:
        """Return all cached stake instances from the blockchain."""
        return self.__stakes

    #
    # Reward and Collection
    #

    @only_me
    def confirm_activity(self) -> str:
        """Miner rewarded for every confirmed period"""
        txhash = self.miner_agent.confirm_activity(
            node_address=self.checksum_address)
        self._transaction_cache.append((datetime.utcnow(), txhash))
        return txhash

    @only_me
    def mint(self) -> Tuple[str, str]:
        """Computes and transfers tokens to the miner's account"""
        mint_txhash = self.miner_agent.mint(node_address=self.checksum_address)
        self._transaction_cache.append((datetime.utcnow(), mint_txhash))
        return mint_txhash

    def calculate_reward(self) -> int:
        staking_reward = self.miner_agent.calculate_staking_reward(
            checksum_address=self.checksum_address)
        return staking_reward

    @only_me
    def collect_policy_reward(self,
                              collector_address=None,
                              policy_agent: PolicyAgent = None):
        """Collect rewarded ETH"""
        policy_agent = policy_agent if policy_agent is not None else PolicyAgent(
            blockchain=self.blockchain)

        withdraw_address = collector_address or self.checksum_address
        policy_reward_txhash = policy_agent.collect_policy_reward(
            collector_address=withdraw_address,
            miner_address=self.checksum_address)
        self._transaction_cache.append(
            (datetime.utcnow(), policy_reward_txhash))
        return policy_reward_txhash

    @only_me
    def collect_staking_reward(self) -> str:
        """Withdraw tokens rewarded for staking."""
        collection_txhash = self.miner_agent.collect_staking_reward(
            checksum_address=self.checksum_address)
        self._transaction_cache.append((datetime.utcnow(), collection_txhash))
        return collection_txhash
Esempio n. 26
0
class CharacterConfiguration(BaseConfiguration):
    """
    'Sideways Engagement' of Character classes; a reflection of input parameters.
    """

    VERSION = 1  # bump when static payload scheme changes

    CHARACTER_CLASS = NotImplemented
    DEFAULT_CONTROLLER_PORT = NotImplemented
    DEFAULT_DOMAIN = NetworksInventory.DEFAULT
    DEFAULT_NETWORK_MIDDLEWARE = RestMiddleware
    TEMP_CONFIGURATION_DIR_PREFIX = 'tmp-nucypher'

    # Gas
    DEFAULT_GAS_STRATEGY = 'fast'

    def __init__(self,

                 # Base
                 config_root: str = None,
                 filepath: str = None,

                 # Mode
                 dev_mode: bool = False,
                 federated_only: bool = False,

                 # Identity
                 checksum_address: str = None,
                 crypto_power: CryptoPower = None,

                 # Keyring
                 keyring: NucypherKeyring = None,
                 keyring_root: str = None,

                 # Learner
                 learn_on_same_thread: bool = False,
                 abort_on_learning_error: bool = False,
                 start_learning_now: bool = True,

                 # Network
                 controller_port: int = None,
                 domains: Set[str] = None,  # TODO: Mapping between learning domains and "registry" domains - #1580
                 interface_signature: Signature = None,
                 network_middleware: RestMiddleware = None,

                 # Node Storage
                 known_nodes: set = None,
                 node_storage: NodeStorage = None,
                 reload_metadata: bool = True,
                 save_metadata: bool = True,

                 # Blockchain
                 poa: bool = False,
                 light: bool = False,
                 sync: bool = False,
                 provider_uri: str = None,
                 provider_process=None,
                 gas_strategy: Union[Callable, str] = DEFAULT_GAS_STRATEGY,

                 # Registry
                 registry: BaseContractRegistry = None,
                 registry_filepath: str = None,

                 emitter=None,
                 ):

        self.log = Logger(self.__class__.__name__)
        UNINITIALIZED_CONFIGURATION.bool_value(False)

        # Identity
        # NOTE: NodeConfigurations can only be used with Self-Characters
        self.is_me = True
        self.checksum_address = checksum_address

        # Network
        self.controller_port = controller_port or self.DEFAULT_CONTROLLER_PORT
        self.network_middleware = network_middleware or self.DEFAULT_NETWORK_MIDDLEWARE()
        self.interface_signature = interface_signature

        # Keyring
        self.crypto_power = crypto_power
        self.keyring = keyring or NO_KEYRING_ATTACHED
        self.keyring_root = keyring_root or UNINITIALIZED_CONFIGURATION

        # Contract Registry
        if registry and registry_filepath:
            if registry.filepath != registry_filepath:
                error = f"Inconsistent registry filepaths for '{registry.filepath}' and '{registry_filepath}'."
                raise ValueError(error)
            else:
                self.log.warn(f"Registry and registry filepath were both passed.")
        self.registry = registry or NO_BLOCKCHAIN_CONNECTION.bool_value(False)
        self.registry_filepath = registry_filepath or UNINITIALIZED_CONFIGURATION

        # Blockchain
        self.poa = poa
        self.is_light = light
        self.provider_uri = provider_uri or NO_BLOCKCHAIN_CONNECTION
        self.provider_process = provider_process or NO_BLOCKCHAIN_CONNECTION

        # Learner
        self.federated_only = federated_only
        self.domains = domains or {self.DEFAULT_DOMAIN}
        self.learn_on_same_thread = learn_on_same_thread
        self.abort_on_learning_error = abort_on_learning_error
        self.start_learning_now = start_learning_now
        self.save_metadata = save_metadata
        self.reload_metadata = reload_metadata
        self.known_nodes = known_nodes or set()  # handpicked

        # Configuration
        self.__dev_mode = dev_mode
        self.config_file_location = filepath or UNINITIALIZED_CONFIGURATION
        self.config_root = UNINITIALIZED_CONFIGURATION

        #
        # Federated vs. Blockchain arguments consistency
        #

        #
        # Federated
        #

        if self.federated_only:
            # Check for incompatible values
            blockchain_args = {'filepath': registry_filepath,
                               'poa': poa,
                               'provider_process': provider_process,
                               'provider_uri': provider_uri,
                               'gas_strategy': gas_strategy}
            if any(blockchain_args.values()):
                bad_args = (f"{arg}={val}" for arg, val in blockchain_args.items() if val)
                self.log.warn(f"Arguments {bad_args} are incompatible with federated_only. "
                              f"Overridden with a sane default.")

                # Clear decentralized attributes to ensure consistency with a
                # federated configuration.
                self.poa = False
                self.is_light = False
                self.provider_uri = None
                self.provider_process = None
                self.registry_filepath = None
                self.gas_strategy = None

        #
        # Decentralized
        #

        else:
            self.gas_strategy = gas_strategy
            is_initialized = BlockchainInterfaceFactory.is_interface_initialized(provider_uri=self.provider_uri)
            if not is_initialized and provider_uri:
                BlockchainInterfaceFactory.initialize_interface(provider_uri=self.provider_uri,
                                                                poa=self.poa,
                                                                light=self.is_light,
                                                                provider_process=self.provider_process,
                                                                sync=sync,
                                                                emitter=emitter,
                                                                gas_strategy=gas_strategy)
            else:
                self.log.warn(f"Using existing blockchain interface connection ({self.provider_uri}).")

            if not self.registry:
                # TODO: These two code blocks are untested.
                if not self.registry_filepath:  # TODO: Registry URI  (goerli://speedynet.json) :-)
                    self.log.info(f"Fetching latest registry from source.")
                    self.registry = InMemoryContractRegistry.from_latest_publication(network=list(self.domains)[0])  # TODO: #1580
                else:
                    self.registry = LocalContractRegistry(filepath=self.registry_filepath)
                    self.log.info(f"Using local registry ({self.registry}).")

        if dev_mode:
            self.__temp_dir = UNINITIALIZED_CONFIGURATION
            self.__setup_node_storage()
            self.initialize(password=DEVELOPMENT_CONFIGURATION)
        else:
            self.__temp_dir = LIVE_CONFIGURATION
            self.config_root = config_root or self.DEFAULT_CONFIG_ROOT
            self._cache_runtime_filepaths()
            self.__setup_node_storage(node_storage=node_storage)

        super().__init__(filepath=self.config_file_location, config_root=self.config_root)

    def __call__(self, **character_kwargs):
        return self.produce(**character_kwargs)

    def update(self, **kwargs) -> None:
        """
        A facility for updating existing attributes on existing configuration instances.

        Warning: This method allows mutation and may result in an inconsistent configuration.
        """
        return super().update(modifier=self.checksum_address, filepath=self.config_file_location, **kwargs)

    @classmethod
    def generate(cls, password: str, *args, **kwargs):
        """Shortcut: Hook-up a new initial installation and write configuration file to the disk"""
        node_config = cls(dev_mode=False, *args, **kwargs)
        node_config.initialize(password=password)
        node_config.to_configuration_file()
        return node_config

    def cleanup(self) -> None:
        if self.__dev_mode:
            self.__temp_dir.cleanup()

    @property
    def dev_mode(self) -> bool:
        return self.__dev_mode

    def __setup_node_storage(self, node_storage=None) -> None:
        if self.dev_mode:
            node_storage = ForgetfulNodeStorage(registry=self.registry, federated_only=self.federated_only)
        elif not node_storage:
            node_storage = LocalFileBasedNodeStorage(registry=self.registry,
                                                     config_root=self.config_root,
                                                     federated_only=self.federated_only)
        self.node_storage = node_storage

    def forget_nodes(self) -> None:
        self.node_storage.clear()
        message = "Removed all stored node node metadata and certificates"
        self.log.debug(message)

    def destroy(self) -> None:
        """Parse a node configuration and remove all associated files from the filesystem"""
        self.attach_keyring()
        self.keyring.destroy()
        os.remove(self.config_file_location)

    def generate_parameters(self, **overrides) -> dict:
        """
        Warning: This method allows mutation and may result in an inconsistent configuration.
        """
        merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
        non_init_params = ('config_root', 'poa', 'light', 'provider_uri', 'registry_filepath', 'gas_strategy')
        character_init_params = filter(lambda t: t[0] not in non_init_params, merged_parameters.items())
        return dict(character_init_params)

    def produce(self, **overrides) -> CHARACTER_CLASS:
        """Initialize a new character instance and return it."""
        merged_parameters = self.generate_parameters(**overrides)
        character = self.CHARACTER_CLASS(**merged_parameters)
        return character

    @classmethod
    def assemble(cls, filepath: str = None, **overrides) -> dict:
        """
        Warning: This method allows mutation and may result in an inconsistent configuration.
        """
        payload = cls._read_configuration_file(filepath=filepath)
        node_storage = cls.load_node_storage(storage_payload=payload['node_storage'],
                                             federated_only=payload['federated_only'])
        domains = set(payload['domains'])

        # Assemble
        payload.update(dict(node_storage=node_storage, domains=domains))
        # Filter out None values from **overrides to detect, well, overrides...
        # Acts as a shim for optional CLI flags.
        overrides = {k: v for k, v in overrides.items() if v is not None}
        payload = {**payload, **overrides}
        return payload

    @classmethod
    def from_configuration_file(cls,
                                filepath: str = None,
                                provider_process=None,
                                **overrides  # < ---- Inlet for CLI Flags
                                ) -> 'CharacterConfiguration':
        """Initialize a CharacterConfiguration from a JSON file."""
        filepath = filepath or cls.default_filepath()
        assembled_params = cls.assemble(filepath=filepath, **overrides)
        try:
            node_configuration = cls(filepath=filepath, provider_process=provider_process, **assembled_params)
        except TypeError as e:
            raise cls.ConfigurationError(e)
        return node_configuration

    def validate(self) -> bool:

        # Top-level
        if not os.path.exists(self.config_root):
            raise self.ConfigurationError(f'No configuration directory found at {self.config_root}.')

        # Sub-paths
        filepaths = self.runtime_filepaths
        for field, path in filepaths.items():
            if path and not os.path.exists(path):
                message = 'Missing configuration file or directory: {}.'
                if 'registry' in path:
                    message += ' Did you mean to pass --federated-only?'
                raise CharacterConfiguration.InvalidConfiguration(message.format(path))
        return True

    def static_payload(self) -> dict:
        """Exported static configuration values for initializing Ursula"""

        payload = dict(

            # Identity
            federated_only=self.federated_only,
            checksum_address=self.checksum_address,
            keyring_root=self.keyring_root,

            # Behavior
            domains=list(self.domains),  # From Set
            learn_on_same_thread=self.learn_on_same_thread,
            abort_on_learning_error=self.abort_on_learning_error,
            start_learning_now=self.start_learning_now,
            save_metadata=self.save_metadata,
            node_storage=self.node_storage.payload(),
        )

        # Optional values (mode)
        if not self.federated_only:
            if self.provider_uri:
                payload.update(dict(provider_uri=self.provider_uri, poa=self.poa, light=self.is_light))
            if self.registry_filepath:
                payload.update(dict(registry_filepath=self.registry_filepath))

            # Gas Price
            payload.update(dict(gas_strategy=self.gas_strategy))

        # Merge with base payload
        base_payload = super().static_payload()
        base_payload.update(payload)

        return payload

    @property  # TODO: Graduate to a method and "derive" dynamic from static payload.
    def dynamic_payload(self) -> dict:
        """Exported dynamic configuration values for initializing Ursula"""
        payload = dict()
        if not self.federated_only:
            payload.update(dict(registry=self.registry))

        payload.update(dict(network_middleware=self.network_middleware or self.DEFAULT_NETWORK_MIDDLEWARE(),
                            known_nodes=self.known_nodes,
                            node_storage=self.node_storage,
                            crypto_power_ups=self.derive_node_power_ups()))
        return payload

    def generate_filepath(self, filepath: str = None, modifier: str = None, override: bool = False) -> str:
        modifier = modifier or self.checksum_address
        filepath = super().generate_filepath(filepath=filepath, modifier=modifier, override=override)
        return filepath

    @property
    def runtime_filepaths(self) -> dict:
        filepaths = dict(config_root=self.config_root,
                         keyring_root=self.keyring_root,
                         registry_filepath=self.registry_filepath)
        return filepaths

    @classmethod
    def generate_runtime_filepaths(cls, config_root: str) -> dict:
        """Dynamically generate paths based on configuration root directory"""
        filepaths = dict(config_root=config_root,
                         config_file_location=os.path.join(config_root, cls.generate_filename()),
                         keyring_root=os.path.join(config_root, 'keyring'))
        return filepaths

    def _cache_runtime_filepaths(self) -> None:
        """Generate runtime filepaths and cache them on the config object"""
        filepaths = self.generate_runtime_filepaths(config_root=self.config_root)
        for field, filepath in filepaths.items():
            if getattr(self, field) is UNINITIALIZED_CONFIGURATION:
                setattr(self, field, filepath)

    def attach_keyring(self, checksum_address: str = None, *args, **kwargs) -> None:
        account = checksum_address or self.checksum_address
        if not account:
            raise self.ConfigurationError("No account specified to unlock keyring")
        if self.keyring is not NO_KEYRING_ATTACHED:
            if self.keyring.checksum_address != account:
                raise self.ConfigurationError("There is already a keyring attached to this configuration.")
            return
        self.keyring = NucypherKeyring(keyring_root=self.keyring_root, account=account, *args, **kwargs)

    def derive_node_power_ups(self) -> List[CryptoPowerUp]:
        power_ups = list()
        if self.is_me and not self.dev_mode:
            for power_class in self.CHARACTER_CLASS._default_crypto_powerups:
                power_up = self.keyring.derive_crypto_power(power_class)
                power_ups.append(power_up)
        return power_ups

    def initialize(self, password: str) -> str:
        """Initialize a new configuration and write installation files to disk."""

        # Development
        if self.dev_mode:
            self.__temp_dir = TemporaryDirectory(prefix=self.TEMP_CONFIGURATION_DIR_PREFIX)
            self.config_root = self.__temp_dir.name

        # Persistent
        else:
            self._ensure_config_root_exists()
            self.write_keyring(password=password)

        self._cache_runtime_filepaths()
        self.node_storage.initialize()

        # Validate
        if not self.__dev_mode:
            self.validate()

        # Success
        message = "Created nucypher installation files at {}".format(self.config_root)
        self.log.debug(message)
        return self.config_root

    def write_keyring(self, password: str, checksum_address: str = None, **generation_kwargs) -> NucypherKeyring:

        if self.federated_only:
            checksum_address = FEDERATED_ADDRESS

        elif not checksum_address:

            # Note: It is assumed the blockchain interface is not yet connected.
            if self.provider_process:

                # Generate Geth's "datadir"
                if not os.path.exists(self.provider_process.data_dir):
                    os.mkdir(self.provider_process.data_dir)

                # Get or create wallet address
                if not self.checksum_address:
                    self.checksum_address = self.provider_process.ensure_account_exists(password=password)
                elif self.checksum_address not in self.provider_process.accounts():
                    raise self.ConfigurationError(f'Unknown Account {self.checksum_address}')

            elif not self.checksum_address:
                raise self.ConfigurationError(f'No checksum address provided for decentralized configuration.')

            checksum_address = self.checksum_address

        self.keyring = NucypherKeyring.generate(password=password,
                                                keyring_root=self.keyring_root,
                                                checksum_address=checksum_address,
                                                **generation_kwargs)

        if self.federated_only:
            self.checksum_address = self.keyring.checksum_address

        return self.keyring

    @classmethod
    def load_node_storage(cls, storage_payload: dict, federated_only: bool):
        from nucypher.config.storages import NodeStorage
        node_storage_subclasses = {storage._name: storage for storage in NodeStorage.__subclasses__()}
        storage_type = storage_payload[NodeStorage._TYPE_LABEL]
        storage_class = node_storage_subclasses[storage_type]
        node_storage = storage_class.from_payload(payload=storage_payload, federated_only=federated_only)
        return node_storage
Esempio n. 27
0
class WorkTracker:

    CLOCK = reactor
    REFRESH_RATE = 60 * 15  # Fifteen minutes

    def __init__(self, worker, refresh_rate: int = None, *args, **kwargs):

        super().__init__(*args, **kwargs)
        self.log = Logger('stake-tracker')
        self.worker = worker
        self.staking_agent = self.worker.staking_agent

        self._refresh_rate = refresh_rate or self.REFRESH_RATE
        self._tracking_task = task.LoopingCall(self._do_work)
        self._tracking_task.clock = self.CLOCK

        self.__requirement = None
        self.__current_period = None
        self.__start_time = NOT_STAKING
        self.__uptime_period = NOT_STAKING
        self._abort_on_error = True

    @property
    def current_period(self):
        return self.__current_period

    def stop(self) -> None:
        if self._tracking_task.running:
            self._tracking_task.stop()
            self.log.info(f"STOPPED WORK TRACKING")

    def start(self,
              act_now: bool = False,
              requirement_func: Callable = None,
              force: bool = False) -> None:
        """
        High-level stake tracking initialization, this function aims
        to be safely called at any time - For example, it is okay to call
        this function multiple times within the same period.
        """
        if self._tracking_task.running and not force:
            return

        # Add optional confirmation requirement callable
        self.__requirement = requirement_func

        # Record the start time and period
        self.__start_time = maya.now()
        self.__uptime_period = self.staking_agent.get_current_period()
        self.__current_period = self.__uptime_period

        self.log.info(f"START WORK TRACKING")
        d = self._tracking_task.start(interval=self._refresh_rate, now=act_now)
        d.addErrback(self.handle_working_errors)

    def _crash_gracefully(self, failure=None) -> None:
        """
        A facility for crashing more gracefully in the event that
        an exception is unhandled in a different thread.
        """
        self._crashed = failure
        failure.raiseException()

    def handle_working_errors(self, *args, **kwargs) -> None:
        failure = args[0]
        if self._abort_on_error:
            self.log.critical(
                'Unhandled error during node work tracking. {failure!r}',
                failure=failure)
            reactor.callFromThread(self._crash_gracefully, failure=failure)
        else:
            self.log.warn(
                'Unhandled error during work tracking: {failure.getTraceback()!r}',
                failure=failure)

    def __check_work_requirement(self) -> bool:
        # TODO: Check for stake expiration and exit
        if self.__requirement is None:
            return True
        try:
            r = self.__requirement()
            if not isinstance(r, bool):
                raise ValueError(f"'requirement' must return a boolean.")
        except TypeError:
            raise ValueError(f"'requirement' must be a callable.")
        return r

    def _do_work(self) -> None:
        # TODO: #1515 Shut down at end of terminal stake

        # Update on-chain status
        self.log.info(
            f"Checking for new period. Current period is {self.__current_period}"
        )
        onchain_period = self.staking_agent.get_current_period(
        )  # < -- Read from contract
        if self.current_period != onchain_period:
            self.__current_period = onchain_period
            # self.worker.stakes.refresh()  # TODO: #1517 Track stakes for fast access to terminal period.

        # Measure working interval
        interval = onchain_period - self.worker.last_committed_period
        if interval < 0:
            return  # No need to commit to this period.  Save the gas.
        if interval > 0:
            # TODO: #1516 Follow-up actions for downtime
            self.log.warn(
                f"MISSED COMMITMENTS - {interval} missed staking commitments detected."
            )

        # Only perform work this round if the requirements are met
        if not self.__check_work_requirement():
            self.log.warn(
                f'COMMIT PREVENTED (callable: "{self.__requirement.__name__}") - '
                f'There are unmet commit requirements.')
            # TODO: Follow-up actions for downtime
            return

        # Make a Commitment
        self.log.info("Made a commitment to period {}".format(
            self.current_period))
        transacting_power = self.worker.transacting_power
        with transacting_power:
            self.worker.commit_to_next_period()  # < --- blockchain WRITE
Esempio n. 28
0
class WebSocketDispatcherProtocol(Protocol):
    """Protocol implementing ShinySDR's WebSocket service.
    
    This protocol's transport should be a txWS WebSocket transport.
    """
    def __init__(self, caps, subscription_context):
        self.__log = Logger()
        self.__subscription_context = subscription_context
        self._caps = caps
        self._seenValues = {}
        self.inner = None

    def dataReceived(self, data):
        """Twisted Protocol implementation.
        
        Additionally, txWS takes no care with exceptions here, so we catch and log."""
        # pylint: disable=broad-except
        try:
            if self.inner is None:
                # To work around txWS's lack of a notification when the URL is available, all clients send a dummy first message.
                self.__dispatch_url()
            else:
                self.inner.dataReceived(data)
        except Exception:
            self.__log.failure('Error processing incoming WebSocket message')

    def __dispatch_url(self):
        self.__log.info('Stream connection to {url}',
                        url=self.transport.location)
        _scheme, _netloc, path_bytes, _params, query_bytes, _fragment = urlparse(
            bytes_or_ascii(self.transport.location))
        # py2/3: unquote returns str in either version but we want Unicode
        path = [
            six.text_type(urllib.parse.unquote(x))
            for x in path_bytes.split(b'/')
        ]
        assert path[0] == ''
        path[0:1] = []
        cap_string = path[0]
        if cap_string in self._caps:
            root_object = self._caps[cap_string]
            path[0:1] = []
        else:
            raise Exception('Unknown cap')  # TODO better error reporting
        if path == [AUDIO_STREAM_PATH_ELEMENT]:
            options = parse_audio_stream_options(parse_qs(query_bytes, 1))
            self.inner = AudioStreamInner(the_reactor, self.__send,
                                          root_object, options.sample_rate)
        elif len(path) >= 1 and path[0] == CAP_OBJECT_PATH_ELEMENT:
            # note _lookup_block may throw. TODO: Better error reporting
            root_object = _lookup_block(root_object, path[1:])
            self.inner = StateStreamInner(
                self.__send, root_object, path_bytes.decode('utf-8'),
                self.__subscription_context
            )  # note reuse of WS path as HTTP path; probably will regret this
        else:
            raise Exception('Unknown path: %r' % (path, ))

    def connectionMade(self):
        """twisted Protocol implementation"""
        self.transport.setBinaryMode(True)
        # Unfortunately, txWS calls this too soon for transport.location to be available

    def connectionLost(self, reason):
        # pylint: disable=signature-differs
        """twisted Protocol implementation"""
        if self.inner is not None:
            self.inner.connectionLost(reason)

    def __send(self, message, safe_to_drop=False):
        if len(self.transport.transport.dataBuffer) > 1000000:
            # TODO: condition is horrible implementation-diving kludge
            # Don't accumulate indefinite buffer if we aren't successfully getting it onto the network.

            # TODO: There are no tests of this mechanism

            if safe_to_drop:
                self.__log.warn('Dropping data going to stream {url}',
                                url=self.transport.location)
            else:
                self.__log.error(
                    'Dropping connection due to too much data on stream {url}',
                    url=self.transport.location)
                self.transport.close(reason='Too much data buffered')
        else:
            self.transport.write(message)
Esempio n. 29
0
class BlockchainInterface:
    """
    Interacts with a solidity compiler and a registry in order to instantiate compiled
    ethereum contracts with the given web3 provider backend.
    """
    __default_timeout = 10  # seconds
    # __default_transaction_gas_limit = 500000  # TODO #842: determine sensible limit and validate transactions

    class InterfaceError(Exception):
        pass

    class NoProvider(InterfaceError):
        pass

    class ConnectionFailed(InterfaceError):
        pass

    class UnknownContract(InterfaceError):
        pass

    def __init__(self,
                 provider_uri: str = None,
                 provider=None,
                 auto_connect: bool = True,
                 timeout: int = None,
                 registry: EthereumContractRegistry = None,
                 compiler: SolidityCompiler = None) -> None:

        """
        A blockchain "network interface"; The circumflex wraps entirely around the bounds of
        contract operations including compilation, deployment, and execution.

         Filesystem          Configuration           Node              Client                  EVM
        ================ ====================== =============== =====================  ===========================

         Solidity Files -- SolidityCompiler ---                  --- HTTPProvider ------ ...
                                               |                |
                                               |                |

                                              *BlockchainInterface* -- IPCProvider ----- External EVM (geth, parity...)

                                               |      |         |
                                               |      |         |
         Registry File -- ContractRegistry ---        |          ---- TestProvider ----- EthereumTester
                                                      |
                        |                             |                                         |
                        |                             |
                                                                                        PyEVM (Development Chain)
         Runtime Files --                 -------- Blockchain
                                         |
                        |                |             |

         Key Files ------ NodeConfiguration -------- Agent ... (Contract API)

                        |                |             |
                        |                |
                        |                 ---------- Actor ... (Blockchain-Character API)
                        |
                        |                              |
                        |
         Config File ---                          Character ... (Public API)

                                                       |

                                                     Human


        The BlockchainInterface is the junction of the solidity compiler, a contract registry, and a collection of
        web3 network providers as a means of interfacing with the ethereum blockchain to execute
        or deploy contract code on the network.


        Compiler and Registry Usage
        -----------------------------

        Contracts are freshly re-compiled if an instance of SolidityCompiler is passed; otherwise,
        The registry will read contract data saved to disk that is be used to retrieve contact address and op-codes.
        Optionally, A registry instance can be passed instead.


        Provider Usage
        ---------------
        https: // github.com / ethereum / eth - tester     # available-backends


        * HTTP Provider - supply endpiont_uri
        * Websocket Provider - supply endpoint uri and websocket=True
        * IPC Provider - supply IPC path
        * Custom Provider - supply an iterable of web3.py provider instances

        """

        self.log = Logger("blockchain-interface")                       # type: Logger

        #
        # Providers
        #

        self.w3 = NO_BLOCKCHAIN_CONNECTION
        self.__provider = provider or NO_BLOCKCHAIN_CONNECTION
        self.provider_uri = NO_BLOCKCHAIN_CONNECTION
        self.timeout = timeout if timeout is not None else self.__default_timeout

        if provider_uri and provider:
            raise self.InterfaceError("Pass a provider URI string, or a list of provider instances.")
        elif provider_uri:
            self.provider_uri = provider_uri
            self.add_provider(provider_uri=provider_uri)
        elif provider:
            self.provider_uri = MANUAL_PROVIDERS_SET
            self.add_provider(provider)
        else:
            self.log.warn("No provider supplied for new blockchain interface; Using defaults")

        # if a SolidityCompiler class instance was passed, compile from solidity source code
        recompile = True if compiler is not None else False
        self.__recompile = recompile
        self.__sol_compiler = compiler

        # Setup the registry and base contract factory cache
        registry = registry if registry is not None else EthereumContractRegistry()
        self.registry = registry
        self.log.info("Using contract registry {}".format(self.registry.filepath))

        if self.__recompile is True:
            # Execute the compilation if we're recompiling
            # Otherwise read compiled contract data from the registry
            interfaces = self.__sol_compiler.compile()
            __raw_contract_cache = interfaces
        else:
            __raw_contract_cache = NO_COMPILATION_PERFORMED
        self.__raw_contract_cache = __raw_contract_cache

        # Auto-connect
        self.autoconnect = auto_connect
        if self.autoconnect is True:
            self.connect()

    def __repr__(self):
        r = '{name}({uri})'.format(name=self.__class__.__name__, uri=self.provider_uri)
        return r

    def connect(self):
        self.log.info("Connecting to {}".format(self.provider_uri))

        if self.__provider is NO_BLOCKCHAIN_CONNECTION:
            raise self.NoProvider("There are no configured blockchain providers")

        # Connect
        web3_instance = Web3(provider=self.__provider)  # Instantiate Web3 object with provider
        self.w3 = web3_instance

        # Check connection
        if not self.is_connected:
            raise self.ConnectionFailed('Failed to connect to provider: {}'.format(self.__provider))

        if self.is_connected:
            self.log.info('Successfully Connected to {}'.format(self.provider_uri))
            return self.is_connected
        else:
            raise self.ConnectionFailed("Failed to connect to {}.".format(self.provider_uri))

    @property
    def provider(self) -> Union[IPCProvider, WebsocketProvider, HTTPProvider]:
        return self.__provider

    @property
    def is_connected(self) -> bool:
        """
        https://web3py.readthedocs.io/en/stable/__provider.html#examples-using-automated-detection
        """
        return self.w3.isConnected()

    @property
    def node_version(self) -> str:
        """Return node version information"""
        return self.w3.node_version.node

    def add_provider(self,
                     provider: Union[IPCProvider, WebsocketProvider, HTTPProvider] = None,
                     provider_uri: str = None,
                     timeout: int = None) -> None:

        if not provider_uri and not provider:
            raise self.NoProvider("No URI or provider instances supplied.")

        if provider_uri and not provider:
            uri_breakdown = urlparse(provider_uri)

            # PyEVM
            if uri_breakdown.scheme == 'tester':

                if uri_breakdown.netloc == 'pyevm':
                    from nucypher.utilities.sandbox.constants import PYEVM_GAS_LIMIT
                    genesis_params = PyEVMBackend._generate_genesis_params(overrides={'gas_limit': PYEVM_GAS_LIMIT})
                    pyevm_backend = PyEVMBackend(genesis_parameters=genesis_params)
                    pyevm_backend.reset_to_genesis(genesis_params=genesis_params,
                                                   num_accounts=NUMBER_OF_ETH_TEST_ACCOUNTS)
                    eth_tester = EthereumTester(backend=pyevm_backend, auto_mine_transactions=True)
                    provider = EthereumTesterProvider(ethereum_tester=eth_tester)
                elif uri_breakdown.netloc == 'geth':
                    # Hardcoded gethdev IPC provider
                    provider = IPCProvider(ipc_path='/tmp/geth.ipc', timeout=timeout)

                else:
                    raise ValueError("{} is an invalid or unsupported blockchain provider URI".format(provider_uri))

            # IPC
            elif uri_breakdown.scheme == 'ipc':
                provider = IPCProvider(ipc_path=uri_breakdown.path, timeout=timeout)

            # Websocket
            elif uri_breakdown.scheme == 'ws':
                provider = WebsocketProvider(endpoint_uri=provider_uri)

            # HTTP
            elif uri_breakdown.scheme in ('http', 'https'):
                provider = HTTPProvider(endpoint_uri=provider_uri)

            else:
                raise self.InterfaceError("'{}' is not a blockchain provider protocol".format(uri_breakdown.scheme))

            self.__provider = provider

    def get_contract_factory(self, contract_name: str) -> Contract:
        """Retrieve compiled interface data from the cache and return web3 contract"""
        try:
            interface = self.__raw_contract_cache[contract_name]
        except KeyError:
            raise self.UnknownContract('{} is not a locally compiled contract.'.format(contract_name))
        except TypeError:
            if self.__raw_contract_cache is NO_COMPILATION_PERFORMED:
                message = "The local contract compiler cache is empty because no compilation was performed."
                raise self.InterfaceError(message)
            raise
        else:
            contract = self.w3.eth.contract(abi=interface['abi'],
                                            bytecode=interface['bin'],
                                            ContractFactoryClass=Contract)
            return contract

    def _wrap_contract(self, wrapper_contract: Contract,
                       target_contract: Contract, factory=Contract) -> Contract:
        """
        Used for upgradeable contracts;
        Returns a new contract object assembled with the address of one contract but the abi or another.
        """

        # Wrap the contract
        wrapped_contract = self.w3.eth.contract(abi=target_contract.abi,
                                                address=wrapper_contract.address,
                                                ContractFactoryClass=factory)
        return wrapped_contract

    def get_contract_by_address(self, address: str):
        """Read a single contract's data from the registrar and return it."""
        try:
            contract_records = self.registry.search(contract_address=address)
        except RuntimeError:
            raise self.InterfaceError('Corrupted Registrar')  # TODO #461: Integrate with Registry
        else:
            if not contract_records:
                raise self.UnknownContract("No such contract with address {}".format(address))
            return contract_records[0]

    def get_contract_by_name(self,
                             name: str,
                             proxy_name: str = None,
                             use_proxy_address: bool = True,
                             factory: Contract = Contract) -> Contract:
        """
        Instantiate a deployed contract from registry data,
        and assimilate it with it's proxy if it is upgradeable.
        """

        target_contract_records = self.registry.search(contract_name=name)

        if not target_contract_records:
            raise self.UnknownContract("No such contract records with name {}".format(name))

        if proxy_name:  # It's upgradeable
            # Lookup proxies; Search fot a published proxy that targets this contract record

            proxy_records = self.registry.search(contract_name=proxy_name)

            unified_pairs = list()
            for proxy_name, proxy_addr, proxy_abi in proxy_records:
                proxy_contract = self.w3.eth.contract(abi=proxy_abi,
                                                      address=proxy_addr,
                                                      ContractFactoryClass=factory)

                # Read this dispatchers target address from the blockchain
                proxy_live_target_address = proxy_contract.functions.target().call()
                for target_name, target_addr, target_abi in target_contract_records:

                    if target_addr == proxy_live_target_address:
                        if use_proxy_address:
                            pair = (proxy_addr, target_abi)
                        else:
                            pair = (proxy_live_target_address, target_abi)
                    else:
                        continue

                    unified_pairs.append(pair)

            if len(unified_pairs) > 1:
                address, abi = unified_pairs[0]
                message = "Multiple {} deployments are targeting {}".format(proxy_name, address)
                raise self.InterfaceError(message.format(name))

            else:
                selected_address, selected_abi = unified_pairs[0]

        else:  # It's not upgradeable
            if len(target_contract_records) != 1:
                m = "Multiple records registered for non-upgradeable contract {}"
                raise self.InterfaceError(m.format(name))
            _target_contract_name, selected_address, selected_abi = target_contract_records[0]

        # Create the contract from selected sources
        unified_contract = self.w3.eth.contract(abi=selected_abi,
                                                address=selected_address,
                                                ContractFactoryClass=factory)

        return unified_contract

    def call_backend_sign(self, account: str, message: bytes) -> str:
        """
        Calls the appropriate signing function for the specified account on the
        backend. If the backend is based on eth-tester, then it uses the
        eth-tester signing interface to do so.
        """
        provider = self.provider
        if isinstance(provider, EthereumTesterProvider):
            address = to_canonical_address(account)
            sig_key = provider.ethereum_tester.backend._key_lookup[address]
            signed_message = sig_key.sign_msg(message)
            return signed_message
        else:
            return self.w3.eth.sign(account, data=message)  # TODO: Technically deprecated...

    def call_backend_verify(self, pubkey: PublicKey, signature: Signature, msg_hash: bytes):
        """
        Verifies a hex string signature and message hash are from the provided
        public key.
        """
        is_valid_sig = signature.verify_msg_hash(msg_hash, pubkey)
        sig_pubkey = signature.recover_public_key_from_msg_hash(msg_hash)

        return is_valid_sig and (sig_pubkey == pubkey)

    def unlock_account(self, address, password, duration):
        if 'tester' in self.provider_uri:
            return True  # Test accounts are unlocked by default.
        return self.w3.geth.personal.unlockAccount(address, password, duration)
Esempio n. 30
0
class Worker(NucypherTokenActor):
    """
    Ursula baseclass for blockchain operations, practically carrying a pickaxe.
    """
    class WorkerError(NucypherTokenActor.ActorError):
        pass

    class DetachedWorker(WorkerError):
        """Raised when the worker address is not assigned an on-chain stake in the StakingEscrow contract."""

    def __init__(self,
                 is_me: bool,
                 stake_tracker: StakeTracker = None,
                 worker_address: str = None,
                 start_working_loop: bool = True,
                 *args,
                 **kwargs) -> None:

        super().__init__(*args, **kwargs)

        self.log = Logger("worker")

        self.__worker_address = worker_address
        self.is_me = is_me

        # Agency
        self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)
        self.staking_agent = StakingEscrowAgent(blockchain=self.blockchain)

        # Stakes
        self.__start_time = WORKER_NOT_RUNNING
        self.__uptime_period = WORKER_NOT_RUNNING

        # Workers cannot be started without being assigned a stake first.
        if is_me:
            self.stake_tracker = stake_tracker or StakeTracker(
                checksum_addresses=[self.checksum_address])

            if not self.stake_tracker.stakes(
                    checksum_address=self.checksum_address):
                raise self.DetachedWorker
            else:
                self.stake_tracker.add_action(self._confirm_period)
                if start_working_loop:
                    self.stake_tracker.start()

    @property
    def last_active_period(self) -> int:
        period = self.staking_agent.get_last_active_period(
            address=self.checksum_address)
        return period

    @only_me
    @save_receipt
    def confirm_activity(self) -> str:
        """For each period that the worker confirms activity, the staker is rewarded"""
        receipt = self.staking_agent.confirm_activity(
            worker_address=self.__worker_address)
        return receipt

    @only_me
    def _confirm_period(self) -> None:
        # TODO: Follow-up actions for downtime
        # TODO: Check for stake expiration and exit
        missed_periods = self.stake_tracker.current_period - self.last_active_period
        if missed_periods:
            self.log.warn(
                f"MISSED CONFIRMATIONS - {missed_periods} missed staking confirmations detected!"
            )
        self.confirm_activity()  # < --- blockchain WRITE
        self.log.info("Confirmed activity for period {}".format(
            self.stake_tracker.current_period))
Esempio n. 31
0
class PhotometerService(ClientService):
    def __init__(self, options, reference):

        self.options = options
        self.namespace = 'ref.' if reference else 'test'
        self.label = self.namespace.upper()
        setLogLevel(namespace=self.label, levelStr=options['log_messages'])
        setLogLevel(namespace=self.namespace, levelStr=options['log_level'])
        self.log = Logger(namespace=self.namespace)
        self.reference = reference  # Flag, is this instance for the reference photometer
        self.factory = self.buildFactory()
        self.protocol = None
        self.serport = None
        self.info = None  # Photometer info
        self.buffer = CircularBuffer(options['size'], self.log)
        parts = chop(self.options['endpoint'], sep=':')
        if parts[0] == 'tcp':
            endpoint = clientFromString(reactor, self.options['endpoint'])
            ClientService.__init__(self,
                                   endpoint,
                                   self.factory,
                                   retryPolicy=backoffPolicy(initialDelay=0.5,
                                                             factor=3.0))

    @inlineCallbacks
    def startService(self):
        '''
        Starts the photometer service listens to a TESS
        Although it is technically a synchronous operation, it works well
        with inline callbacks
        '''
        self.log.info("starting {name} service", name=self.name)
        yield self.connect()
        self.info = yield self.getInfo()
        if self.reference:
            returnValue(None)
        # Now this is for the test photometer only
        if self.options['dry_run']:
            self.log.info('Dry run. Will stop here ...')
            yield self.stopService()
        elif self.info is None:
            yield self.stopService()
        elif self.options['zero_point'] is not None:
            try:
                result = yield self.protocol.writeZeroPoint(
                    self.options['zero_point'])
            except Exception as e:
                self.log.error("Timeout when updating Zero Point")
                self.log.failure("{excp}", excp=e)
            else:
                self.log.info("[{label}] Writen ZP : {zp:0.2f}",
                              label=self.label,
                              zp=result['zp'])
            finally:
                yield self.stopService()

    def stopService(self):
        self.log.info("stopping {name} service", name=self.name)
        try:
            reactor.callLater(0, reactor.stop)
        except Exception as e:
            log.error("could not stop the reactor")
        return defer.succeed(None)

    # --------------
    # Photometer API
    # --------------

    def writeZeroPoint(self, zero_point):
        '''Writes Zero Point to the device. Returns a Deferred'''
        return self.protocol.writeZeroPoint(zero_point)

    def getPhotometerInfo(self):
        if self.protocol is None:
            self.log.warn("Requested photometer info but no protocol yet!")
            return defer.fail()
        if self.info is None:
            return self.getInfo()
        else:
            return defer.succeed(self.info)

    # --------------
    # Helper methods
    # ---------------

    @inlineCallbacks
    def connect(self):
        parts = chop(self.options['endpoint'], sep=':')
        if parts[0] == 'serial':
            endpoint = parts[1:]
            self.protocol = self.factory.buildProtocol(0)
            try:
                self.serport = SerialPort(self.protocol,
                                          endpoint[0],
                                          reactor,
                                          baudrate=endpoint[1])
            except Exception as e:
                self.log.error("{excp}", excp=e)
                yield self.stopService()
            else:
                self.gotProtocol(self.protocol)
                self.log.info("Using serial port {tty} at {baud} bps",
                              tty=endpoint[0],
                              baud=endpoint[1])
        else:
            ClientService.startService(self)
            try:
                protocol = yield self.whenConnected(failAfterFailures=1)
            except Exception as e:
                self.log.error("{excp}", excp=e)
                yield self.stopService()
            else:
                self.gotProtocol(protocol)
                self.log.info("Using TCP endpoint {endpoint}",
                              endpoint=self.options['endpoint'])

    @inlineCallbacks
    def getInfo(self):
        try:
            info = yield self.protocol.readPhotometerInfo()
        except Exception as e:
            self.log.error("Timeout when reading photometer info")
            info = self.fixIt()
            returnValue(info)  # May be None
        else:
            info['model'] = self.options['model']
            info['label'] = self.label
            self.log.info("[{label}] Model     : {value}",
                          label=self.label,
                          value=info['model'])
            self.log.info("[{label}] Name      : {value}",
                          label=self.label,
                          value=info['name'])
            self.log.info("[{label}] MAC       : {value}",
                          label=self.label,
                          value=info['mac'])
            self.log.info("[{label}] Zero Point: {value:.02f} (old)",
                          label=self.label,
                          value=info['zp'])
            self.log.info("[{label}] Firmware  : {value}",
                          label=self.label,
                          value=info['firmware'])
            returnValue(info)

    def fixIt(self):
        parts = chop(self.options['endpoint'], sep=':')
        if self.reference and (self.options['model']
                               == TESSW) and parts[0] == 'serial':
            info = {
                'model': TESSW,
                'label': self.label,
                'name': self.options['name'],
                'mac': self.options['mac'],
                'zp': 20.50,
                'firmware': "",
            }
            self.log.error("Fixed photometer info with defaults {info}",
                           info=info)
            return info
        else:
            return None

    def limitedStart(self):
        '''Detects the case where only the Test photometer service is started'''
        if self.reference:
            return False
        return (self.options['dry_run']
                or self.options['zero_point'] is not None)

    def buildFactory(self):
        if self.options['model'] == TESSW:
            self.log.debug("Choosing a {model} factory", model=TESSW)
            import zptess.tessw
            factory = zptess.tessw.TESSProtocolFactory(self.label)
        elif self.options['model'] == TESSP:
            self.log.debug("Choosing a {model} factory", model=TESSP)
            import zptess.tessp
            factory = zptess.tessp.TESSProtocolFactory(self.label)
        else:
            self.log.debug("Choosing a {model} factory", model=TAS)
            import zptess.tas
            factory = zptess.tas.TESSProtocolFactory(self.label)
        return factory

    def gotProtocol(self, protocol):
        self.log.debug("got protocol")
        protocol.setContext(self.options['endpoint'])
        self.buffer.registerProducer(protocol, True)
        if self.limitedStart():
            protocol.stopProducing(
            )  # We don need to feed messages to the buffer
        self.protocol = protocol
Esempio n. 32
0
class NodeConfiguration(ABC):
    """
    'Sideways Engagement' of Character classes; a reflection of input parameters.
    """

    # Abstract
    _NAME = NotImplemented
    _CHARACTER_CLASS = NotImplemented
    CONFIG_FILENAME = NotImplemented
    DEFAULT_CONFIG_FILE_LOCATION = NotImplemented

    # Mode
    DEFAULT_OPERATING_MODE = 'decentralized'

    # Domains
    DEFAULT_DOMAIN = GLOBAL_DOMAIN

    # Serializers
    NODE_SERIALIZER = binascii.hexlify
    NODE_DESERIALIZER = binascii.unhexlify

    # System
    __CONFIG_FILE_EXT = '.config'
    __CONFIG_FILE_DESERIALIZER = json.loads
    TEMP_CONFIGURATION_DIR_PREFIX = "nucypher-tmp-"

    # Blockchain
    DEFAULT_PROVIDER_URI = 'tester://pyevm'

    # Registry
    __REGISTRY_NAME = 'contract_registry.json'
    REGISTRY_SOURCE = os.path.join(
        BASE_DIR, __REGISTRY_NAME)  # TODO: #461 Where will this be hosted?

    # Rest + TLS
    DEFAULT_REST_HOST = '127.0.0.1'
    DEFAULT_REST_PORT = 9151
    DEFAULT_DEVELOPMENT_REST_PORT = 10151
    __DEFAULT_TLS_CURVE = ec.SECP384R1
    __DEFAULT_NETWORK_MIDDLEWARE_CLASS = RestMiddleware

    class ConfigurationError(RuntimeError):
        pass

    class InvalidConfiguration(ConfigurationError):
        pass

    def __init__(
            self,

            # Base
            config_root: str = None,
            config_file_location: str = None,

            # Mode
            dev_mode: bool = False,
            federated_only: bool = False,

            # Identity
            is_me: bool = True,
            checksum_public_address: str = None,
            crypto_power: CryptoPower = None,

            # Keyring
            keyring: NucypherKeyring = None,
            keyring_dir: str = None,

            # Learner
            learn_on_same_thread: bool = False,
            abort_on_learning_error: bool = False,
            start_learning_now: bool = True,

            # REST
            rest_host: str = None,
            rest_port: int = None,

            # TLS
            tls_curve: EllipticCurve = None,
            certificate: Certificate = None,

            # Network
            domains: Set[str] = None,
            interface_signature: Signature = None,
            network_middleware: RestMiddleware = None,

            # Node Storage
            known_nodes: set = None,
            node_storage: NodeStorage = None,
            reload_metadata: bool = True,
            save_metadata: bool = True,

            # Blockchain
            poa: bool = False,
            provider_uri: str = None,

            # Registry
            registry_source: str = None,
            registry_filepath: str = None,
            import_seed_registry: bool = False  # TODO: needs cleanup
    ) -> None:

        # Logs
        self.log = Logger(self.__class__.__name__)

        #
        # REST + TLS (Ursula)
        #
        self.rest_host = rest_host or self.DEFAULT_REST_HOST
        default_port = (self.DEFAULT_DEVELOPMENT_REST_PORT
                        if dev_mode else self.DEFAULT_REST_PORT)
        self.rest_port = rest_port or default_port
        self.tls_curve = tls_curve or self.__DEFAULT_TLS_CURVE
        self.certificate = certificate

        self.interface_signature = interface_signature
        self.crypto_power = crypto_power

        #
        # Keyring
        #
        self.keyring = keyring or NO_KEYRING_ATTACHED
        self.keyring_dir = keyring_dir or UNINITIALIZED_CONFIGURATION

        # Contract Registry
        if import_seed_registry is True:
            registry_source = self.REGISTRY_SOURCE
            if not os.path.isfile(registry_source):
                message = "Seed contract registry does not exist at path {}.".format(
                    registry_filepath)
                self.log.debug(message)
                raise RuntimeError(message)
        self.__registry_source = registry_source or self.REGISTRY_SOURCE
        self.registry_filepath = registry_filepath or UNINITIALIZED_CONFIGURATION

        #
        # Configuration
        #
        self.config_file_location = config_file_location or UNINITIALIZED_CONFIGURATION
        self.config_root = UNINITIALIZED_CONFIGURATION

        #
        # Mode
        #
        self.federated_only = federated_only
        self.__dev_mode = dev_mode

        if self.__dev_mode:
            self.__temp_dir = UNINITIALIZED_CONFIGURATION
            self.node_storage = ForgetfulNodeStorage(
                federated_only=federated_only, character_class=self.__class__)
        else:
            self.__temp_dir = LIVE_CONFIGURATION
            self.config_root = config_root or DEFAULT_CONFIG_ROOT
            self._cache_runtime_filepaths()
            self.node_storage = node_storage or LocalFileBasedNodeStorage(
                federated_only=federated_only, config_root=self.config_root)

        # Domains
        self.domains = domains or {self.DEFAULT_DOMAIN}

        #
        # Identity
        #
        self.is_me = is_me
        self.checksum_public_address = checksum_public_address

        if self.is_me is True or dev_mode is True:
            # Self
            if self.checksum_public_address and dev_mode is False:
                self.attach_keyring()
            self.network_middleware = network_middleware or self.__DEFAULT_NETWORK_MIDDLEWARE_CLASS(
            )

        else:
            # Stranger
            self.node_storage = STRANGER_CONFIGURATION
            self.keyring_dir = STRANGER_CONFIGURATION
            self.keyring = STRANGER_CONFIGURATION
            self.network_middleware = STRANGER_CONFIGURATION
            if network_middleware:
                raise self.ConfigurationError(
                    "Cannot configure a stranger to use network middleware.")

        #
        # Learner
        #
        self.learn_on_same_thread = learn_on_same_thread
        self.abort_on_learning_error = abort_on_learning_error
        self.start_learning_now = start_learning_now
        self.save_metadata = save_metadata
        self.reload_metadata = reload_metadata

        self.__fleet_state = FleetStateTracker()
        known_nodes = known_nodes or set()
        if known_nodes:
            self.known_nodes._nodes.update(
                {node.checksum_public_address: node
                 for node in known_nodes})
            self.known_nodes.record_fleet_state(
            )  # TODO: Does this call need to be here?

        #
        # Blockchain
        #
        self.poa = poa
        self.provider_uri = provider_uri or self.DEFAULT_PROVIDER_URI

        self.blockchain = NO_BLOCKCHAIN_CONNECTION
        self.accounts = NO_BLOCKCHAIN_CONNECTION
        self.token_agent = NO_BLOCKCHAIN_CONNECTION
        self.miner_agent = NO_BLOCKCHAIN_CONNECTION
        self.policy_agent = NO_BLOCKCHAIN_CONNECTION

        #
        # Development Mode
        #
        if dev_mode:

            # Ephemeral dev settings
            self.abort_on_learning_error = True
            self.save_metadata = False
            self.reload_metadata = False

            # Generate one-time alphanumeric development password
            alphabet = string.ascii_letters + string.digits
            password = ''.join(secrets.choice(alphabet) for _ in range(32))

            # Auto-initialize
            self.initialize(password=password,
                            import_registry=import_seed_registry)

    def __call__(self, *args, **kwargs):
        return self.produce(*args, **kwargs)

    @classmethod
    def generate(cls, password: str, no_registry: bool, *args,
                 **kwargs) -> 'UrsulaConfiguration':
        """Shortcut: Hook-up a new initial installation and write configuration file to the disk"""
        ursula_config = cls(dev_mode=False, is_me=True, *args, **kwargs)
        ursula_config.__write(password=password, no_registry=no_registry)
        return ursula_config

    def __write(self, password: str, no_registry: bool):
        _new_installation_path = self.initialize(password=password,
                                                 import_registry=no_registry)
        _configuration_filepath = self.to_configuration_file(
            filepath=self.config_file_location)

    def cleanup(self) -> None:
        if self.__dev_mode:
            self.__temp_dir.cleanup()

    @property
    def dev_mode(self):
        return self.__dev_mode

    @property
    def known_nodes(self):
        return self.__fleet_state

    def connect_to_blockchain(self, recompile_contracts: bool = False):
        if self.federated_only:
            raise NodeConfiguration.ConfigurationError(
                "Cannot connect to blockchain in federated mode")

        self.blockchain = Blockchain.connect(provider_uri=self.provider_uri,
                                             compile=recompile_contracts,
                                             poa=self.poa)

        self.accounts = self.blockchain.interface.w3.eth.accounts
        self.log.debug("Established connection to provider {}".format(
            self.blockchain.interface.provider_uri))

    def connect_to_contracts(self) -> None:
        """Initialize contract agency and set them on config"""
        self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)
        self.miner_agent = MinerAgent(blockchain=self.blockchain)
        self.policy_agent = PolicyAgent(blockchain=self.blockchain)
        self.log.debug("Established connection to nucypher contracts")

    def read_known_nodes(self):
        known_nodes = self.node_storage.all(federated_only=self.federated_only)
        known_nodes = {
            node.checksum_public_address: node
            for node in known_nodes
        }
        self.known_nodes._nodes.update(known_nodes)
        self.known_nodes.record_fleet_state()
        return self.known_nodes

    def forget_nodes(self) -> None:
        self.node_storage.clear()
        message = "Removed all stored node node metadata and certificates"
        self.log.debug(message)

    def destroy(self, force: bool = False, logs: bool = True) -> None:

        # TODO: Further confirm this is a nucypher dir first! (in-depth measure)

        if logs is True or force:
            shutil.rmtree(USER_LOG_DIR, ignore_errors=True)
        try:
            shutil.rmtree(self.config_root, ignore_errors=force)
        except FileNotFoundError:
            raise FileNotFoundError("No such directory {}".format(
                self.config_root))

    def generate_parameters(self, **overrides) -> dict:
        merged_parameters = {
            **self.static_payload,
            **self.dynamic_payload,
            **overrides
        }
        non_init_params = ('config_root', 'poa', 'provider_uri')
        character_init_params = filter(lambda t: t[0] not in non_init_params,
                                       merged_parameters.items())
        return dict(character_init_params)

    def produce(self, **overrides):
        """Initialize a new character instance and return it."""
        merged_parameters = self.generate_parameters(**overrides)
        character = self._CHARACTER_CLASS(**merged_parameters)
        return character

    @staticmethod
    def _read_configuration_file(filepath: str) -> dict:
        try:
            with open(filepath, 'r') as file:
                raw_contents = file.read()
                payload = NodeConfiguration.__CONFIG_FILE_DESERIALIZER(
                    raw_contents)
        except FileNotFoundError as e:
            raise  # TODO: Do we need better exception handling here?
        return payload

    @classmethod
    def from_configuration_file(cls,
                                filepath: str = None,
                                **overrides) -> 'NodeConfiguration':
        """Initialize a NodeConfiguration from a JSON file."""

        from nucypher.config.storages import NodeStorage
        node_storage_subclasses = {
            storage._name: storage
            for storage in NodeStorage.__subclasses__()
        }

        if filepath is None:
            filepath = cls.DEFAULT_CONFIG_FILE_LOCATION

        # Read from disk
        payload = cls._read_configuration_file(filepath=filepath)

        # Initialize NodeStorage subclass from file (sub-configuration)
        storage_payload = payload['node_storage']
        storage_type = storage_payload[NodeStorage._TYPE_LABEL]
        storage_class = node_storage_subclasses[storage_type]
        node_storage = storage_class.from_payload(
            payload=storage_payload,
            # character_class=cls._CHARACTER_CLASS,  # TODO: Do not pass this here - Always Use Ursula
            federated_only=payload['federated_only'],
            serializer=cls.NODE_SERIALIZER,
            deserializer=cls.NODE_DESERIALIZER)

        # Deserialize domains to UTF-8 bytestrings
        domains = list(domain.encode() for domain in payload['domains'])
        payload.update(dict(node_storage=node_storage, domains=domains))

        # Filter out Nones from overrides to detect, well, overrides
        overrides = {k: v for k, v in overrides.items() if v is not None}

        # Instantiate from merged params
        node_configuration = cls(**{**payload, **overrides})

        return node_configuration

    def to_configuration_file(self, filepath: str = None) -> str:
        """Write the static_payload to a JSON file."""
        if filepath is None:
            filename = '{}{}'.format(self._NAME.lower(),
                                     self.__CONFIG_FILE_EXT)
            filepath = os.path.join(self.config_root, filename)

        payload = self.static_payload
        del payload['is_me']  # TODO

        # Serialize domains
        domains = list(str(d) for d in self.domains)

        # Save node connection data
        payload.update(
            dict(node_storage=self.node_storage.payload(), domains=domains))

        with open(filepath, 'w') as config_file:
            config_file.write(json.dumps(payload, indent=4))
        return filepath

    def validate(self, config_root: str, no_registry=False) -> bool:
        # Top-level
        if not os.path.exists(config_root):
            raise self.ConfigurationError(
                'No configuration directory found at {}.'.format(config_root))

        # Sub-paths
        filepaths = self.runtime_filepaths
        if no_registry:
            del filepaths['registry_filepath']

        for field, path in filepaths.items():
            if not os.path.exists(path):
                message = 'Missing configuration file or directory: {}.'
                if 'registry' in path:
                    message += ' Did you mean to pass --federated-only?'
                raise NodeConfiguration.InvalidConfiguration(
                    message.format(path))
        return True

    @property
    def static_payload(self) -> dict:
        """Exported static configuration values for initializing Ursula"""
        payload = dict(
            config_root=self.config_root,

            # Identity
            is_me=self.is_me,
            federated_only=self.federated_only,
            checksum_public_address=self.checksum_public_address,
            keyring_dir=self.keyring_dir,

            # Behavior
            domains=self.domains,  # From Set
            learn_on_same_thread=self.learn_on_same_thread,
            abort_on_learning_error=self.abort_on_learning_error,
            start_learning_now=self.start_learning_now,
            save_metadata=self.save_metadata,
        )

        if not self.federated_only:
            payload.update(dict(provider_uri=self.provider_uri, poa=self.poa))

        return payload

    @property
    def dynamic_payload(self, **overrides) -> dict:
        """Exported dynamic configuration values for initializing Ursula"""

        if self.reload_metadata:
            known_nodes = self.node_storage.all(
                federated_only=self.federated_only)
            known_nodes = {
                node.checksum_public_address: node
                for node in known_nodes
            }
            self.known_nodes._nodes.update(known_nodes)
        self.known_nodes.record_fleet_state()

        payload = dict(network_middleware=self.network_middleware
                       or self.__DEFAULT_NETWORK_MIDDLEWARE_CLASS(),
                       known_nodes=self.known_nodes,
                       node_storage=self.node_storage,
                       crypto_power_ups=self.derive_node_power_ups() or None)

        if not self.federated_only:
            self.connect_to_blockchain(recompile_contracts=False)
            payload.update(blockchain=self.blockchain)

        if overrides:
            self.log.debug(
                "Overrides supplied to dynamic payload for {}".format(
                    self.__class__.__name__))
            payload.update(overrides)

        return payload

    @property
    def runtime_filepaths(self):
        filepaths = dict(config_root=self.config_root,
                         keyring_dir=self.keyring_dir,
                         registry_filepath=self.registry_filepath)
        return filepaths

    @classmethod
    def generate_runtime_filepaths(cls, config_root: str) -> dict:
        """Dynamically generate paths based on configuration root directory"""
        filepaths = dict(
            config_root=config_root,
            config_file_location=os.path.join(config_root,
                                              cls.CONFIG_FILENAME),
            keyring_dir=os.path.join(config_root, 'keyring'),
            registry_filepath=os.path.join(config_root,
                                           NodeConfiguration.__REGISTRY_NAME))
        return filepaths

    def _cache_runtime_filepaths(self) -> None:
        """Generate runtime filepaths and cache them on the config object"""
        filepaths = self.generate_runtime_filepaths(
            config_root=self.config_root)
        for field, filepath in filepaths.items():
            if getattr(self, field) is UNINITIALIZED_CONFIGURATION:
                setattr(self, field, filepath)

    def derive_node_power_ups(self) -> List[CryptoPowerUp]:
        power_ups = list()
        if self.is_me and not self.dev_mode:
            for power_class in self._CHARACTER_CLASS._default_crypto_powerups:
                power_up = self.keyring.derive_crypto_power(power_class)
                power_ups.append(power_up)
        return power_ups

    def initialize(
        self,
        password: str,
        import_registry: bool = True,
    ) -> str:
        """Initialize a new configuration."""

        #
        # Create Config Root
        #
        if self.__dev_mode:
            self.__temp_dir = TemporaryDirectory(
                prefix=self.TEMP_CONFIGURATION_DIR_PREFIX)
            self.config_root = self.__temp_dir.name
        else:
            try:
                os.mkdir(self.config_root, mode=0o755)

            except FileExistsError:
                if os.listdir(self.config_root):
                    message = "There are existing files located at {}".format(
                        self.config_root)
                    raise self.ConfigurationError(message)

            except FileNotFoundError:
                os.makedirs(self.config_root, mode=0o755)

        #
        # Create Config Subdirectories
        #
        self._cache_runtime_filepaths()
        try:

            # Node Storage
            self.node_storage.initialize()

            # Keyring
            if not self.dev_mode:
                os.mkdir(
                    self.keyring_dir,
                    mode=0o700)  # keyring TODO: Keyring backend entry point
                self.write_keyring(password=password)

            # Registry
            if import_registry and not self.federated_only:
                self.write_registry(
                    output_filepath=self.registry_filepath,  # type: str
                    source=self.__registry_source,  # type: str
                    blank=import_registry)  # type: bool

        except FileExistsError:
            existing_paths = [
                os.path.join(self.config_root, f)
                for f in os.listdir(self.config_root)
            ]
            message = "There are pre-existing files at {}: {}".format(
                self.config_root, existing_paths)
            self.log.critical(message)
            raise NodeConfiguration.ConfigurationError(message)

        if not self.__dev_mode:
            self.validate(config_root=self.config_root,
                          no_registry=import_registry or self.federated_only)

        # Success
        message = "Created nucypher installation files at {}".format(
            self.config_root)
        self.log.debug(message)
        return self.config_root

    def attach_keyring(self,
                       checksum_address: str = None,
                       *args,
                       **kwargs) -> None:
        if self.keyring is not NO_KEYRING_ATTACHED:
            if self.keyring.checksum_address != (checksum_address or
                                                 self.checksum_public_address):
                raise self.ConfigurationError(
                    "There is already a keyring attached to this configuration."
                )
            return

        if (checksum_address or self.checksum_public_address) is None:
            raise self.ConfigurationError(
                "No account specified to unlock keyring")

        self.keyring = NucypherKeyring(
            keyring_root=self.keyring_dir,  # type: str
            account=checksum_address
            or self.checksum_public_address,  # type: str
            *args,
            **kwargs)

    def write_keyring(self, password: str,
                      **generation_kwargs) -> NucypherKeyring:

        self.keyring = NucypherKeyring.generate(password=password,
                                                keyring_root=self.keyring_dir,
                                                **generation_kwargs)
        # Operating mode switch TODO: #466
        if self.federated_only:
            self.checksum_public_address = self.keyring.federated_address
        else:
            self.checksum_public_address = self.keyring.checksum_address

        return self.keyring

    def write_registry(self,
                       output_filepath: str = None,
                       source: str = None,
                       force: bool = False,
                       blank=False) -> str:

        if force and os.path.isfile(output_filepath):
            raise self.ConfigurationError(
                'There is an existing file at the registry output_filepath {}'.
                format(output_filepath))

        output_filepath = output_filepath or self.registry_filepath
        source = source or self.REGISTRY_SOURCE

        if not blank and not self.dev_mode:
            # Validate Registry
            with open(source, 'r') as registry_file:
                try:
                    json.loads(registry_file.read())
                except JSONDecodeError:
                    message = "The registry source {} is not valid JSON".format(
                        source)
                    self.log.critical(message)
                    raise self.ConfigurationError(message)
                else:
                    self.log.debug(
                        "Source registry {} is valid JSON".format(source))

        else:
            self.log.warn("Writing blank registry")
            open(output_filepath, 'w').close()  # write blank

        self.log.debug(
            "Successfully wrote registry to {}".format(output_filepath))
        return output_filepath
Esempio n. 33
0
class BaseProtocol(serialBytesProtocol):

    def __init__(self, shorthand=True, callback=None, escaped=False,
                 error_callback=None):

        serialBytesProtocol.__init__(self)
        if callback:
            self.callbacks = [callback]
        else:
            self.callbacks = []
        self.setRawMode()
        self.shorthand = shorthand
        self._escaped = escaped
        self.log = Logger()
        self.requests = {}
        self.command_id = 0
        self.buffer = None
#         self.reading = False

    def get_id(self):
        try:
            self.command_id += 1
            return intToByte(self.command_id)
        except ValueError:
            self.command_id = 1
            return intToByte(1)

    def connect(self, f):
        if f.callback:
            self.callbacks.append(f.callback)
        f.proto = self

    def rawDataReceived(self, data):
        for byte in data:
            if self.buffer:
                self.buffer.fill(byte)
                if self.buffer.remaining_bytes() == 0:
                    try:
                        # Try to parse and return result
                        self.buffer.parse()
                        # Ignore empty frames
                        if len(self.buffer.data) == 0:
                            self.buffer = None

                    except ValueError:
                        # Bad frame, so restart
                        self.log.warn('Bad frame: %r'
                                      % self.buffer.raw_data)

                    else:
                        self.read_frame(self.buffer.data)
                    self.buffer = None
#                     self.reading = False
            else:
                if byte == Frame.START_BYTE:
                    #                     self.reading == True
                    self.buffer = Frame(escaped=self._escaped)

    def read_frame(self, frame):
        """
        read_frame: binary data -> {'id':str,
                                         'param':binary data,
                                         ...}
        read_frame takes a data packet received from an XBee device
        and converts it into a dictionary. This dictionary provides
        names for each segment of binary data as specified in the
        api_responses spec.
        """
        # Fetch the first byte, identify the packet
        # If the spec doesn't exist, raise exception
        packet_id = frame[0:1]
        try:
            name = self.api_responses[packet_id]
        except AttributeError:
            raise NotImplementedError(
                "API response specifications could not be found; " +
                "use a derived class which defines 'api_responses'.")
        except KeyError:
            # Check to see if this ID can be found among transmittible packets
            for cmd_name, cmd in list(self.api_frames.items()):
                if cmd['id']['default'] == packet_id:
                    msg = "Incoming frame with id {packet_id} looks like a " +\
                        "command frame of type '{cmd_name}' (these should " +\
                        " not be received). Are you sure your devices " +\
                        "are in API mode?"
                    self.log.error(
                        msg, packet_id=bytes(frame), cmd_name=cmd_name)
                    return

            self.log.error("Unrecognized response packet with id byte {f}",
                           f=frame[0])
            return

        # Current byte index in the data stream
        packet = self.api_frames[name]
        index = 0
        callback = False

        # Result info
        info = {'id': name}
#         packet_spec = packet['structure']

        # Parse the packet in the order specified

        if 'frame_id' in packet:
            callback = True

#         if packet['len'] == 'null_terminated':
#             field_data = b''
#             while frame[index:index + 1] != b'\x00':
#                 field_data += frame[index:index + 1]
#                 index += 1
#             index += 1
#             info[name]
        for field, dic in packet.items():
            if dic['len'] == 'null_terminated':
                field_data = b''

                while frame[index:index] != b'\x00':
                    field_data += frame[index:index]
                    index += 1

                index += 1
                info[field] = field_data
            elif dic['len'] is not None:
                # Store the number of bytes specified

                # Are we trying to read beyond the last data element?
                if index + dic['len'] > len(frame):
                    raise ValueError(
                        "Response packet was shorter than expected")

                field_data = frame[index:index + dic['len']]
                info[field] = field_data

                index += dic['len']
            # If the data field has no length specified, store any
            #  leftover bytes and quit
            else:
                field_data = frame[index:-1]

                # Were there any remaining bytes?
                if field_data:
                    # If so, store them
                    info[field] = field_data
                    index += len(field_data) + 1
                break

        # If there are more bytes than expected, raise an exception
        if index + 1 < len(frame):
            raise ValueError(
                "Response packet was longer than expected; " +
                "expected: %d, got: %d bytes" % (index, len(frame)))

        # Apply parsing rules if any exist
        if 'parsing' in packet:
            for parse_rule in packet['parsing']:
                # Only apply a rule if it is relevant (raw data is available)
                if parse_rule[0] in info:
                    # Apply the parse function to the indicated field and
                    # replace the raw data with the result
                    info[parse_rule[0]] = parse_rule[1](self, info)
        if callback:
            if info['frame_id'] in self.requests:
                self.requests[info['frame_id']].callback(info)
                del self.requests[info['frame_id']]
            else:
                self.log.warn('Response without request: %r' % info)
        elif self.callbacks:
            for callback in self.callbacks:
                callback(info)
        else:
            self.log.debug(info)

    def _build_command(self, cmd, **kwargs):
        """
        _build_command: string (binary data) ... -> binary data
        _build_command will construct a command packet according to the
        specified command's specification in api_commands. It will expect
        named arguments for all fields other than those with a default
        value or a length of 'None'.
        Each field will be written out in the order they are defined
        in the command definition.
        """
        try:
            cmd_spec = self.api_frames[cmd]
        except AttributeError:
            raise NotImplementedError(
                "API command specifications could not be found; " +
                "use a derived class which defines 'api_commands'.")

        packet = b''

        if 'frame_id' in kwargs:
            fid = kwargs['frame_id']
        elif cmd in ['source_route']:
            fid = b'\x00'
        else:
            fid = self.get_id()
        for name, dic in cmd_spec.items():
            if name == 'frame_id':
                data = fid
            elif name in kwargs:
                data = kwargs[name]
            else:
                if dic['len']:
                    if dic['default']:
                        data = dic['default']
                    else:
                        raise KeyError(
                            "The expected field %s of length %d was " +
                            "not provided" % (name, dic['len']))
                else:
                    data = None
            if dic['len'] and len(data) != dic['len']:
                raise ValueError(
                    "The data provided for '%s' was not %d bytes long"
                    % (name, dic['len']))
            if data:
                packet += data

        return packet, fid

    def send(self, cmd, **kwargs):
        """
        send: string param=binary data ... -> None
        When send is called with the proper arguments, an API command
        will be written to the serial port for this XBee device
        containing the proper instructions and data.
        This method must be called with named arguments in accordance
        with the api_command specification. Arguments matching all
        field names other than those in reserved_names (like 'id' and
        'order') should be given, unless they are of variable length
        (of 'None' in the specification. Those are optional).
        """
        # Pass through the keyword arguments
#         if self.reading:
#             return task.deferLater(.5, self.send, cmd, **kwargs)
        packet, fid = self._build_command(cmd, **kwargs)
        d = defer.Deferred()
        self.requests.update({fid: d})
        f = Frame(packet).output()
        self.transport.write(f)
        return d

    def _parse_samples_header(self, io_bytes):
        """
        _parse_samples_header: binary data in XBee IO data format ->
                        (int, [int ...], [int ...], int, int)
        _parse_samples_header will read the first three bytes of the
        binary data given and will return the number of samples which
        follow, a list of enabled digital inputs, a list of enabled
        analog inputs, the dio_mask, and the size of the header in bytes
        """
        header_size = 4

        # number of samples (always 1?) is the first byte
        sample_count = byteToInt(io_bytes[0])

        # part of byte 1 and byte 2 are the DIO mask ( 9 bits )
        dio_mask = (
            byteToInt(io_bytes[1]) << 8 | byteToInt(io_bytes[2])) & 0x01FF

        # upper 7 bits of byte 1 is the AIO mask
        aio_mask = byteToInt(io_bytes[3]) & 0xFE >> 1
#         print(byteToInt(io_bytes[3]) & 0xFE >> 1)
#         print(aio_mask)

        # sorted lists of enabled channels; value is position of bit in mask
        dio_chans = []
        aio_chans = []

        for i in range(0, 9):
            if dio_mask & (1 << i):
                dio_chans.append(i)

        dio_chans.sort()

        for i in range(0, 7):
            if aio_mask & (1 << i):
                aio_chans.append(i)

        aio_chans.sort()

        return (sample_count, dio_chans, aio_chans, dio_mask, header_size)

    def _parse_samples(self, io_bytes):
        """
        _parse_samples: binary data in XBee IO data format ->
                        [ {"dio-0":True,
                           "dio-1":False,
                           "adc-0":100"}, ...]
        _parse_samples reads binary data from an XBee device in the IO
        data format specified by the API. It will then return a
        dictionary indicating the status of each enabled IO port.
        """

        sample_count, dio_chans, aio_chans, dio_mask, header_size = \
            self._parse_samples_header(io_bytes)

        samples = []

        # split the sample data into a list, so it can be pop()'d
#         self.log.debug('%r' % io_bytes)
        sample_bytes = [byteToInt(c) for c in io_bytes[header_size:]]
#         self.log.debug('%r' % sample_bytes)
#         self.log.debug('%r' % aio_chans)

        # repeat for every sample provided
        for sample_ind in range(0, sample_count):  # @UnusedVariable
            tmp_samples = {}

            if dio_chans:
                # we have digital data
                digital_data_set = (
                    sample_bytes.pop(0) << 8 | sample_bytes.pop(0))
                digital_values = dio_mask & digital_data_set

                for i in dio_chans:
                    tmp_samples['dio-{0}'.format(i)] = True if (
                        digital_values >> i) & 1 else False

            for i in aio_chans:
                analog_sample = (
                    sample_bytes.pop(0) << 8 | sample_bytes.pop(0))
                tmp_samples['adc-{0}'.format(i)] = int(
                    (analog_sample * 1200.0) / 1023.0)

            samples.append(tmp_samples)

        return samples

    def _parse_sensor_data(self, io_bytes):
        # TODO
        return [{'data': io_bytes}]

    def __getattr__(self, name):
        """
        If a method by the name of a valid api command is called,
        the arguments will be automatically sent to an appropriate
        send() call
        """

        # If api_commands is not defined, raise NotImplementedError\
        #  If its not defined, _getattr__ will be called with its name
        if name == 'api_frames':
            raise NotImplementedError(
                "API command specifications could not be found; use a " +
                "derived class which defines 'api_commands'.")

        # Is shorthand enabled, and is the called name a command?
        if self.shorthand and name in self.api_frames:
            # If so, simply return a function which passes its arguments
            # to an appropriate send() call
            return lambda **kwargs: self.send(name, **kwargs)
        else:
            raise AttributeError("XBee has no attribute '%s'" % name)