示例#1
0
文件: utils.py 项目: haf/cheesepi
def start_control_server():
	import argparse

	from twisted.internet import reactor
	from twisted.logger import Logger, globalLogPublisher, STDLibLogObserver

	from cheesepi.server.control import (CheeseRPCServerFactory,
	                                     CheeseRPCServer)
	from cheesepi.server.storage.mongo import MongoDAO

	# Argument parsing
	parser = argparse.ArgumentParser()
	parser.add_argument('--port', type=int, default=18080,
	                    help='Port to listen on')
	args = parser.parse_args()

	init_logging()

	# Make twisted logging write to pythons logging module
	globalLogPublisher.addObserver(STDLibLogObserver(name="cheesepi.server.control"))

	# Use twisted logger when in twisted
	log = Logger()

	# Logging
	#log = Logger()
	#globalLogPublisher.addObserver(PrintingObserver())

	#dao = MongoDAO()
	dao = MongoDAO('localhost', 27017)
	control_server = CheeseRPCServer(dao).getStreamFactory(CheeseRPCServerFactory)

	reactor.listenTCP(args.port, control_server)
	log.info("Starting control server on port %d..." % args.port)
	reactor.run()
示例#2
0
文件: i2c.py 项目: bverdu/onDemand
class i2cProtocol(LineOnlyReceiver):

    def __init__(self):
        self.log = Logger()
        self.__funcs = {}

    def connectionMade(self):
        self.log.debug('i2c connected')

    def lineReceived(self, line):
        line = line.strip()
        called = line[:9].lstrip('0')
        onoff = bool(int(line[-1]))
        try:
            call = self.__funcs[called]
        except:
            return
        else:
            call(onoff)

    def send_on(self):
        self.transport.write(self.factory.on_msg)

    def send_off(self):
        self.transport.write(self.factory.off_msg)

    def addCallback(self, name, func):
        self.__funcs[name] = func

    def remCallback(self, name):
        try:
            del self.__funcs[name]
        except KeyError:
            return
示例#3
0
文件: utils.py 项目: haf/cheesepi
def start_upload_server():
	import argparse

	from twisted.internet import reactor
	from twisted.logger import Logger, globalLogPublisher, STDLibLogObserver
	from twisted.web.server import Site
	from twisted.web.resource import Resource

	from cheesepi.server.upload import UploadHandler

	# Argument parsing
	parser = argparse.ArgumentParser()
	parser.add_argument('--port', type=int, default=18090,
	                    help='Port to listen on')
	args = parser.parse_args()

	init_logging()

	# Make twisted logging write to pythons logging module
	globalLogPublisher.addObserver(STDLibLogObserver(name="cheesepi.server.upload"))

	# Use twisted logger when in twisted
	log = Logger()

	root = Resource()
	root.putChild("upload", UploadHandler())
	upload_server = Site(root)

	reactor.listenTCP(args.port, upload_server)
	log.info("Starting upload server on port %d..." % args.port)
	reactor.run()
class FanOperatingMode(Service):
    '''
    classdocs
    '''
    version = (1, 0)
    serviceType = 'urn:schemas-upnp-org:service:HVAC_FanOperatingMode:1'
    serviceId = 'urn:schemas-upnp-org:serviceId:HVAC_FanOperatingMode'
    serviceUrl = 'fanmode'
    type = 'FanOperating'
    subscription_timeout_range = (None, None)

    def __init__(self, xmlfile, client, name='Application', system=False):
        '''
        Constructor
        '''
        super(FanOperatingMode, self).__init__(
            self.type, self.serviceType, xml=xmlfile,
            client=client, appname=name)
        self.log = Logger()
        self.client = client
        self.client.UPNP_fan_event = self.upnp_event
        self.mode = 'ContinuousOn'
        self.fanstatus = 'Off'
        self.name = name

    def upnp_event(self, evt, var):
        self.log.debug('fan event: %s  ==> %s' % (var, evt))
        setattr(self, var, evt)
示例#5
0
class TemperatureSensor(Service):
    '''
    classdocs
    '''
    version = (1, 0)
    serviceType = 'urn:schemas-upnp-org:service:TemperatureSensor:1'
    serviceId = 'urn:schemas-upnp-org:serviceId:TemperatureSensor'
    serviceUrl = 'temp'
    type = 'Temperature'
    subscription_timeout_range = (None, None)

    def __init__(self, xmlfile, client, name='Application', system=False):
        '''
        Constructor
        '''
        super(TemperatureSensor, self).__init__(
            self.type, self.serviceType, xml=xmlfile,
            client=client, appname=name)
        self.log = Logger()
        self.client = client
        if system:
            self.application = 'Outdoor'
        else:
            self.application = 'Room'
        self.client.UPNP_Temp_event = self.upnp_event
        self.currenttemperature = 2000
        self.name = name

    def upnp_event(self, evt, var):
        self.log.debug('temp event: %s  ==> %s' % (var, evt))
        setattr(self, var, evt)
示例#6
0
class Volume(Service):
    version = (1, 0)
    serviceType = "urn:av-openhome-org:service:Volume:1"
    serviceId = "urn:av-openhome-org:serviceId:Volume"
    serviceUrl = "Volume"
    type = 'Volume'
    subscription_timeout_range = (None, None)

    def __init__(self, xmlfile, client, name='Application'):
        super(Volume, self).__init__(
            self.type, self.serviceType, xml=xmlfile,
            client=client, appname=name)
        self.log = Logger()
        self.client = client
        self.client.oh_eventVOLUME = self.upnp_event
        self.volumemax = self.client.max_volume
        self.volumeunity = 3
        self.volume = self.volumemax
        self.volumesteps = self.volumemax
        self.volumemillidbperstep = 600
        self.balancemax = 10
        self.balance = 0
        self.fademax = 10
        self.fade = 0
        self.mute = 0

    def upnp_event(self, evt, var):
        self.log.debug('volume event: %s  ==> %s' % (var, evt))
        setattr(self, var, evt)
示例#7
0
class HouseStatus(Service):
    '''
    classdocs
    '''
    version = (1, 0)
    serviceType = 'urn:schemas-upnp-org:service:HouseStatus:1'
    serviceId = 'urn:schemas-upnp-org:serviceId:HouseStatus'
    serviceUrl = 'house'
    type = 'House'
    subscription_timeout_range = (None, None)

    def __init__(self, xmlfile, client, name='Application'):
        '''
        Constructor
        '''
        super(HouseStatus, self).__init__(
            self.type, self.serviceType, xml=xmlfile,
            client=client, appname=name)
        self.log = Logger()
        self.client = client
        self.client.houses.append(self)
        self.occupancystate = 'Indeterminate'
        self.activitylevel = 'Regular'
        self.dormancylevel = 'Regular'

    def upnp_event(self, evt, var):
        self.log.debug('away event: %s  ==> %s' % (var, evt))
        setattr(self, var, evt)
示例#8
0
class Demo_light_factory(ReconnectingClientFactory, Client):

    def __init__(self, long_address=b'\x00\x00\x00\x00\x00\x00\xFF\xFF',
                 address=b'\xFF\xFE', pin=0,
                 api_level=1, net_type=None, stateless=True):
        self.long_address = long_address
        self.address = address
        self._pin = pin
        self.pin = 'dio-' + bytes(pin)
        self.status = False
        self.proto = None
        self.log = Logger()
        self.callback = self.receive
        self.stateless = stateless

    '''
    Remote functions
    '''

    def r_set_target(self, value):

        if value is not self.status:
            if value is True:
                self.proto.remote_at(dest_addr_long=self.long_address,
                                     command=b'D%d' % self._pin,
                                     parameter=b'\x05')
            else:
                self.proto.remote_at(dest_addr_long=self.long_address,
                                     command=b'D%d' % self._pin,
                                     parameter=b'\x04')
                
            if self.stateless:
                self.status = value
                self.event(value, 'status')

    def r_get_target(self):
        return self.status

    def r_get_status(self):
        return self.status

    def set_status(self, status):
        if status is not self.status:
            self.log.debug('%r --> %s' % (self.long_address,
                                          'jour!' if status else 'nuit!'))
            self.status = status
            self.event(status, 'status')

    def receive(self, data):
        if 'samples' in data:
            for sample in data['samples']:
                if self.pin in sample:
                    self.set_status(sample[self.pin])
        elif 'parameter' in data:
            for sample in data['parameter']:
                if self.pin in sample:
                    self.set_status(sample[self.pin])
示例#9
0
class LoggingProcessProtocol(ProcessProtocol, object):
    """
    A ProcessProtocol that logs all output to a file
    """
    def __init__(self, commandname, maxbackups=3):
        log_name = commandname + ".log"
        log_dir = os.path.join(fs.adirs.user_log_dir, "processes")
        if not os.path.isdir(log_dir):
            os.makedirs(log_dir)
        log_name = os.path.join(log_dir, log_name)
        _backup_logs(log_name, maxbackups)
        self.log = Logger(observer=textFileLogObserver(io.open(log_name, "w")),
                          namespace="")
        super(LoggingProcessProtocol, self).__init__()

    def connectionMade(self):
        self.finished = defer.Deferred()

    def outReceived(self, data):
        self.log.info("{data}", data=bytes_to_str(data.strip()))

    def errReceived(self, data):
        self.log.error("{data}", data=bytes_to_str(data.strip()))

    def processEnded(self, reason):
        if reason.check(ProcessDone):
            self.finished.callback(True)
            self.log.info("Process finished without error")
        else:
            self.finished.errback(reason)
            self.log.error("Process ended with error: {reason!r}",
                           reason=reason)
示例#10
0
    def test_logger_namespace(self):
        """
        A `twisted.logger.Logger` with a namespace gets that namespace as a prefix.
        """
        fout = StringIO()
        log = Logger(namespace="ns", observer=FileLogObserver(fout, formatForSystemd))

        log.info("info\n{more}", more="info")
        log.error("err")

        self.assertEqual((
            "<6>[ns] info\n"
            "<6>  info\n"
            "<3>[ns] err\n"
        ), fout.getvalue())
示例#11
0
	def __init__(self, accessList):
		self.logger = Logger(observer=textFileLogObserver(sys.stdout))

		self.accessList = [nick.lower() for nick in accessList]

		if not os.path.exists(self.magicFile):
			self.logger.info("Creating magic file")

			try:
				with open(self.magicFile, "a"):
					pass

			except Exception as ex:
				self.logger.error("Unable to create magic file! {0}".format(ex.message))
				reactor.stop()

		self.markovGenerator = pymarkov.MarkovChainGenerator(self.magicFile)

		self.channels = []
		self.channelPhrasers = {}

		self.logger.debug("Discord initialized")

		# Maybe add hook/plugin system here?

		self.commands = Commands.Commands(self)		
示例#12
0
文件: i2c.py 项目: bverdu/onDemand
class Fake_HE_endpoint(object):
    bus = None
    clients = {}

    def __init__(self, reactor, bus_addr, addr, speed):
        self.random = False
        self.log = Logger()
        self.reactor = reactor
        self.bus_addr = bus_addr
        self.pair = addr
        self.speed = speed
        self.running = False

    def connect(self, clientFactory):
        proto = clientFactory.proto
        proto.transport = self
        if clientFactory.addr not in self.clients:
            self.clients.update({clientFactory.addr: proto})
        if not self.bus:
            r = task.LoopingCall(self.check)
            r.start(20)
        clientFactory.doStart()
        return defer.succeed(None)

    def check(self):
        if not self.running:
            for client in self.clients.values():
                client.connectionMade()
            self.running = True
            self.bus = True
        self.random = not self.random
        l = '162342660' + str(int(self.random))
        ll = '334455660' + str(int(not self.random))
        if l[:-1] in self.clients:
            self.clients[l[:-1]].lineReceived(l)
        if ll[:-1] in self.clients:
            self.clients[ll[:-1]].lineReceived(ll)

    def write(self, msg):
        t = []
        if len(msg) < 11:
            for n in msg:
                t.append(ord(n))
        else:
            raise Exception('too much data')
        self.log.debug('send %s to i2c link' % t)
示例#13
0
文件: i2c.py 项目: bverdu/onDemand
 def __init__(self, reactor, bus_addr, addr, speed):
     self.random = False
     self.log = Logger()
     self.reactor = reactor
     self.bus_addr = bus_addr
     self.pair = addr
     self.speed = speed
     self.running = False
示例#14
0
    def test_logger_namespace_failure(self):
        """
        An unexpected failure, logged as critical, is displayed across multiple
        lines.
        """
        fout = StringIO()
        log = Logger(namespace="ns", observer=FileLogObserver(fout, formatForSystemd))

        log.failure("Something went wrong", Failure(Exception("1\n2\n3")))

        self.assertEqual((
            "<2>[ns] Something went wrong\n"
            "<2>  Traceback (most recent call last):\n"
            "<2>  Failure: builtins.Exception: 1\n"
            "<2>  2\n"
            "<2>  3\n"
        ), fout.getvalue())
示例#15
0
文件: mpd.py 项目: bverdu/onDemand
 def __init__(self):
     """
     doc
     """
     self.log = Logger()
     self.delimiter = "\n"
     self.deferreds = []
     self.buff = {}
     self.idle = False
     self.list_index = 0
示例#16
0
def test_log_converter(handler, framework):
    pytest.importorskip("twisted.logger")
    # this checks that we can convert a plain Twisted Logger calling
    # failure() into a traceback on our observers.
    from twisted.logger import Logger
    from txaio.tx import _LogObserver

    out = six.StringIO()
    observer = _LogObserver(out)
    logger = Logger(observer=observer)

    try:
        raise RuntimeError("failed on purpose")
    except:
        logger.failure(None)

    output = out.getvalue()
    assert "failed on purpose" in output
    assert "Traceback" in output
示例#17
0
 def __init__(self, commandname, maxbackups=3):
     log_name = commandname + ".log"
     log_dir = os.path.join(fs.adirs.user_log_dir, "processes")
     if not os.path.isdir(log_dir):
         os.makedirs(log_dir)
     log_name = os.path.join(log_dir, log_name)
     _backup_logs(log_name, maxbackups)
     self.log = Logger(observer=textFileLogObserver(io.open(log_name, "w")),
                       namespace="")
     super(LoggingProcessProtocol, self).__init__()
示例#18
0
 def __init__(self, sid, callback, timeout):
     self.log = Logger()
     self.sid = sid
     self.callback_addr = callback
     self.timeout = timeout
     self.last_subscribe = time.time()
     self.next_notify_key = 0
     self.expired = False  # subscription has been flagged for deletion
     self.agent = Agent(reactor)
     self.pending_events = {}
     self.pending = False
    def _relaying_test(self, eliot_logger, observer):
        """
        Publish an event using ``twisted.logger`` with ``observer`` hooked up and
        assert that the event ends up being seen by ``eliot_logger``.
        """
        twisted_logger = TwistedLogger(observer=observer)
        twisted_logger.info("Hello, world.")

        [event] = eliot_logger.messages
        self.assertThat(
            event,
            ContainsDict(dict(
                # A couple things from the Twisted side of the fence.
                log_namespace=Equals("lae_util.test.test_eliot"),
                log_format=Equals("Hello, world."),
                # And also some Eliot stuff.
                task_uuid=IsInstance(unicode),
                task_level=IsInstance(list),
            )),
        )
示例#20
0
 def __init__(self, long_address=b'\x00\x00\x00\x00\x00\x00\xFF\xFF',
              address=b'\xFF\xFE', pin=0,
              api_level=1, net_type=None):
     self.long_address = long_address
     self.address = address
     self._pin = pin
     self.pin = 'dio-' + bytes(pin)
     self.status = False
     self.proto = None
     self.log = Logger()
     self.callback = self.receive
示例#21
0
 def __init__(self, event_fct=None):
     self.protocol = serialLineProtocol()
     self.uid = uuid.uuid4()
     self.protocol.factory = self
     self.log = Logger()
     self.first = True
     self.event = event_fct
     self.callback = None
     self.wait = False
     self.response = ''
     self.resp_re = re.compile(
         r'^OK|ERROR|(\+CM[ES] ERROR: \d+)|(COMMAND NOT SUPPORT)$')
示例#22
0
 def __init__(self, xmlfile, client, name='Application'):
     '''
     Constructor
     '''
     super(HouseStatus, self).__init__(
         self.type, self.serviceType, xml=xmlfile,
         client=client, appname=name)
     self.log = Logger()
     self.client = client
     self.client.houses.append(self)
     self.occupancystate = 'Indeterminate'
     self.activitylevel = 'Regular'
     self.dormancylevel = 'Regular'
示例#23
0
class serialLineProtocol(LineOnlyReceiver):

    def __init__(self):
        self.log = Logger()
        self.__callbacks = {}

    def connectionMade(self):
        self.log.debug('serial connected')

    def lineReceived(self, line):
        for name in self.__callbacks:
            self.__callbacks[name](line)

    def send(self, data):
        self.transport.write(data)

    def addCallback(self, name, func):
        self.__callbacks.update({name: func})

    def remCallback(self, name):
        if name in self.__callbacks:
            del self.__callbacks[name]
 def __init__(self, xmlfile, client, name='Application', system=False):
     '''
     Constructor
     '''
     super(FanOperatingMode, self).__init__(
         self.type, self.serviceType, xml=xmlfile,
         client=client, appname=name)
     self.log = Logger()
     self.client = client
     self.client.UPNP_fan_event = self.upnp_event
     self.mode = 'ContinuousOn'
     self.fanstatus = 'Off'
     self.name = name
示例#25
0
 def __init__(self, protocol, event_fct=None):
     '''
     Constructor
     '''
     self.log = Logger()
     self.first = True
     self.event = event_fct
     self.callback = None
     self.wait = False
     self.response = ''
     self.protocol = protocol
     self.protocol.addCallback(self.receive)
     self.resp_re = re.compile(
                 r'^OK|ERROR|(\+CM[ES] ERROR: \d+)|(COMMAND NOT SUPPORT)$')
示例#26
0
文件: base.py 项目: bverdu/onDemand
    def __init__(self, shorthand=True, callback=None, escaped=False,
                 error_callback=None):

        serialBytesProtocol.__init__(self)
        if callback:
            self.callbacks = [callback]
        else:
            self.callbacks = []
        self.setRawMode()
        self.shorthand = shorthand
        self._escaped = escaped
        self.log = Logger()
        self.requests = {}
        self.command_id = 0
        self.buffer = None
示例#27
0
 def __init__(self, xmlfile, client, name='Application', system=False):
     '''
     Constructor
     '''
     super(TemperatureSensor, self).__init__(
         self.type, self.serviceType, xml=xmlfile,
         client=client, appname=name)
     self.log = Logger()
     self.client = client
     if system:
         self.application = 'Outdoor'
     else:
         self.application = 'Room'
     self.client.UPNP_Temp_event = self.upnp_event
     self.currenttemperature = 2000
     self.name = name
示例#28
0
	def __init__(self, configFileName):
		self.config = Config(self, configFileName)
		
		self.boundPorts = {}
		self.loadedModules = {}
		self._loadedModuleData = {}
		self._unloadingModules = {}
		self.commonModules = set()
		self.userCommands = {}
		self.serverCommands = {}
		self.channelModes = ({}, {}, {}, {})
		self.channelStatuses = {}
		self.channelStatusSymbols = {}
		self.channelStatusOrder = []
		self.channelModeTypes = {}
		self.userModes = ({}, {}, {}, {})
		self.userModeTypes = {}
		self.actions = {}
		self.storage = None
		self.storageSyncer = None
		self.dataCache = {}
		self.functionCache = {}
		
		self.serverID = None
		self.name = None
		self.isupport_tokens = {
			"CASEMAPPING": "strict-rfc1459",
			"CHANTYPES": "#",
		}
		self._uid = self._genUID()
		
		self.users = {}
		self.userNicks = CaseInsensitiveDictionary()
		self.channels = CaseInsensitiveDictionary(WeakValueDictionary)
		self.servers = {}
		self.serverNames = CaseInsensitiveDictionary()
		self.recentlyQuitUsers = {}
		self.recentlyQuitServers = {}
		self.recentlyDestroyedChannels = CaseInsensitiveDictionary()
		self.pruneRecentlyQuit = None
		self.pruneRecentChannels = None
		
		self._logFilter = LogLevelFilterPredicate()
		filterObserver = FilteringLogObserver(globalLogPublisher, (self._logFilter,))
		self.log = Logger("txircd", observer=filterObserver)
		
		self.startupTime = None
示例#29
0
 def __init__(self, xmlfile, client, name='Application'):
     super(Volume, self).__init__(
         self.type, self.serviceType, xml=xmlfile,
         client=client, appname=name)
     self.log = Logger()
     self.client = client
     self.client.oh_eventVOLUME = self.upnp_event
     self.volumemax = self.client.max_volume
     self.volumeunity = 3
     self.volume = self.volumemax
     self.volumesteps = self.volumemax
     self.volumemillidbperstep = 600
     self.balancemax = 10
     self.balance = 0
     self.fademax = 10
     self.fade = 0
     self.mute = 0
示例#30
0
文件: rest.py 项目: bverdu/onDemand
 def __init__(
         self,
         host='https://developer-api.nest.com',
         token=None,
         event_handler=None,
         net_type='lan'):
     self.log = Logger()
     self.host = host
     self.token = token
     self.event_handler = event_handler
     self.pool = HTTPConnectionPool(reactor, persistent=True)
     self.loc = None
     self.reconnect = False
     self.fail_count = 0
     if event_handler:
         self.reconnect = True
         d = self.request(headers={'User-Agent': ['onDemand Rest Client'],
                                   'Accept': ['text/event-stream']})
         d.addCallback(self.on_disconnect)
示例#31
0
class NucypherKeyring:
    """
    Handles keys for a single identity, recognized by account.
    Warning: This class handles private keys!

    - keyring
        - .private
            - key.priv
            - key.priv.pem
        - public
            - key.pub
            - cert.pem

    """

    MINIMUM_PASSWORD_LENGTH = 16

    __default_keyring_root = os.path.join(DEFAULT_CONFIG_ROOT, 'keyring')
    _private_key_serializer = _PrivateKeySerializer()
    __DEFAULT_TLS_CURVE = ec.SECP384R1

    log = Logger("keys")

    class KeyringError(Exception):
        pass

    class KeyringLocked(KeyringError):
        pass

    class AuthenticationFailed(KeyringError):
        pass

    def __init__(
        self,
        account: str,
        keyring_root: str = None,
        root_key_path: str = None,
        pub_root_key_path: str = None,
        signing_key_path: str = None,
        pub_signing_key_path: str = None,
        delegating_key_path: str = None,
        tls_key_path: str = None,
        tls_certificate_path: str = None,
    ) -> None:
        """
        Generates a NuCypherKeyring instance with the provided key paths falling back to default keyring paths.
        """

        # Identity
        self.__account = account
        self.__keyring_root = keyring_root or self.__default_keyring_root

        # Generate base filepaths
        __default_base_filepaths = self._generate_base_filepaths(
            keyring_root=self.__keyring_root)
        self.__public_key_dir = __default_base_filepaths['public_key_dir']
        self.__private_key_dir = __default_base_filepaths['private_key_dir']

        # Check for overrides
        __default_key_filepaths = self._generate_key_filepaths(
            account=self.__account,
            public_key_dir=self.__public_key_dir,
            private_key_dir=self.__private_key_dir)

        # Private
        self.__root_keypath = root_key_path or __default_key_filepaths['root']
        self.__signing_keypath = signing_key_path or __default_key_filepaths[
            'signing']
        self.__delegating_keypath = delegating_key_path or __default_key_filepaths[
            'delegating']
        self.__tls_keypath = tls_key_path or __default_key_filepaths['tls']

        # Public
        self.__root_pub_keypath = pub_root_key_path or __default_key_filepaths[
            'root_pub']
        self.__signing_pub_keypath = pub_signing_key_path or __default_key_filepaths[
            'signing_pub']
        self.__tls_certificate = tls_certificate_path or __default_key_filepaths[
            'tls_certificate']

        # Set Initial State
        self.__derived_key_material = KEYRING_LOCKED

    def __del__(self) -> None:
        self.lock()

    #
    # Public Keys
    #
    @property
    def checksum_address(self) -> str:
        return to_checksum_address(self.__account)

    @property
    def signing_public_key(self):
        signature_pubkey_bytes = _read_keyfile(
            keypath=self.__signing_pub_keypath, deserializer=None)
        signature_pubkey = UmbralPublicKey.from_bytes(signature_pubkey_bytes)
        return signature_pubkey

    @property
    def encrypting_public_key(self):
        encrypting_pubkey_bytes = _read_keyfile(
            keypath=self.__root_pub_keypath, deserializer=None)
        encrypting_pubkey = UmbralPublicKey.from_bytes(encrypting_pubkey_bytes)
        return encrypting_pubkey

    @property
    def certificate_filepath(self) -> str:
        return self.__tls_certificate

    @property
    def keyring_root(self) -> str:
        return self.__keyring_root

    #
    # Utils
    #
    @staticmethod
    def _generate_base_filepaths(keyring_root: str) -> Dict[str, str]:
        base_paths = dict(public_key_dir=os.path.join(keyring_root, 'public'),
                          private_key_dir=os.path.join(keyring_root,
                                                       'private'))
        return base_paths

    @staticmethod
    def _generate_key_filepaths(public_key_dir: str, private_key_dir: str,
                                account: str) -> dict:
        __key_filepaths = {
            'root':
            os.path.join(private_key_dir, 'root-{}.priv'.format(account)),
            'root_pub':
            os.path.join(public_key_dir, 'root-{}.pub'.format(account)),
            'signing':
            os.path.join(private_key_dir, 'signing-{}.priv'.format(account)),
            'delegating':
            os.path.join(private_key_dir,
                         'delegating-{}.priv'.format(account)),
            'signing_pub':
            os.path.join(public_key_dir, 'signing-{}.pub'.format(account)),
            'tls':
            os.path.join(private_key_dir, '{}.priv.pem'.format(account)),
            'tls_certificate':
            os.path.join(public_key_dir, '{}.pem'.format(account))
        }

        return __key_filepaths

    @unlock_required
    def __decrypt_keyfile(self, key_path: str) -> UmbralPrivateKey:
        """Returns plaintext version of decrypting key."""
        key_data = _read_keyfile(key_path,
                                 deserializer=self._private_key_serializer)
        wrap_key = _derive_wrapping_key_from_key_material(
            salt=key_data['wrap_salt'],
            key_material=self.__derived_key_material)
        plain_umbral_key = UmbralPrivateKey.from_bytes(
            key_bytes=key_data['key'], wrapping_key=wrap_key)
        return plain_umbral_key

    #
    # Public API
    #
    @property
    def account(self) -> str:
        return self.__account

    @property
    def is_unlocked(self) -> bool:
        return self.__derived_key_material is not KEYRING_LOCKED

    def lock(self) -> bool:
        """Make efforts to remove references to the cached key data"""
        self.__derived_key_material = KEYRING_LOCKED
        return self.is_unlocked

    def unlock(self, password: str) -> bool:
        if self.is_unlocked:
            return self.is_unlocked
        key_data = _read_keyfile(keypath=self.__root_keypath,
                                 deserializer=self._private_key_serializer)
        self.log.info("Unlocking keyring.")
        try:
            derived_key = derive_key_from_password(
                password=password.encode(), salt=key_data['master_salt'])
        except CryptoError:
            self.log.info("Keyring unlock failed.")
            raise self.AuthenticationFailed
        else:
            self.__derived_key_material = derived_key
            self.log.info("Finished unlocking.")
        return self.is_unlocked

    @unlock_required
    def derive_crypto_power(
        self, power_class: ClassVar
    ) -> Union[KeyPairBasedPower, DerivedKeyBasedPower]:
        """
        Takes either a SigningPower or a DecryptingPower and returns
        either a SigningPower or DecryptingPower with the coinciding
        private key.

        TODO: Derive a key from the root_key.
        """
        # Keypair-Based
        if issubclass(power_class, KeyPairBasedPower):

            codex = {
                SigningPower: self.__signing_keypath,
                DecryptingPower: self.__root_keypath,
                TLSHostingPower: self.__tls_keypath
            }

            # Create Power
            try:
                umbral_privkey = self.__decrypt_keyfile(codex[power_class])
                keypair = power_class._keypair_class(umbral_privkey)
                new_cryptopower = power_class(keypair=keypair)
            except KeyError:
                failure_message = "{} is an invalid type for deriving a CryptoPower".format(
                    power_class.__name__)
                raise TypeError(failure_message)

        # Derived
        elif issubclass(power_class, DerivedKeyBasedPower):
            key_data = _read_keyfile(self.__delegating_keypath,
                                     deserializer=self._private_key_serializer)
            wrap_key = _derive_wrapping_key_from_key_material(
                salt=key_data['wrap_salt'],
                key_material=self.__derived_key_material)
            keying_material = SecretBox(wrap_key).decrypt(key_data['key'])
            new_cryptopower = power_class(keying_material=keying_material)

        else:
            failure_message = "{} is an invalid type for deriving a CryptoPower.".format(
                power_class.__name__)
            raise ValueError(failure_message)

        return new_cryptopower

    #
    # Create
    #
    @classmethod
    def generate(
        cls,
        checksum_address: str,
        password: str,
        encrypting: bool = True,
        rest: bool = False,
        host: str = None,
        curve: EllipticCurve = None,
        keyring_root: str = None,
    ) -> 'NucypherKeyring':
        """
        Generates new encrypting, signing, and wallet keys encrypted with the password,
        respectively saving keyfiles on the local filesystem from *default* paths,
        returning the corresponding Keyring instance.
        """

        failures = cls.validate_password(password)
        if failures:
            raise cls.AuthenticationFailed(
                ", ".join(failures)
            )  # TODO: Ensure this scope is seperable from the scope containing the password

        if not any((encrypting, rest)):
            raise ValueError(
                'Either "encrypting", "wallet", or "tls" must be True '
                'to generate new keys, or set "no_keys" to True to skip generation.'
            )

        if curve is None:
            curve = cls.__DEFAULT_TLS_CURVE

        _base_filepaths = cls._generate_base_filepaths(
            keyring_root=keyring_root)
        _public_key_dir = _base_filepaths['public_key_dir']
        _private_key_dir = _base_filepaths['private_key_dir']

        #
        # Generate New Keypairs
        #

        keyring_args = dict()

        if checksum_address is not FEDERATED_ADDRESS:
            # Addresses read from some node keyrings (clients) are *not* returned in checksum format.
            checksum_address = to_checksum_address(checksum_address)

        if encrypting is True:
            signing_private_key, signing_public_key = _generate_signing_keys()

            if checksum_address is FEDERATED_ADDRESS:
                uncompressed_bytes = signing_public_key.to_bytes(
                    is_compressed=False)
                without_prefix = uncompressed_bytes[1:]
                verifying_key_as_eth_key = EthKeyAPI.PublicKey(without_prefix)
                checksum_address = verifying_key_as_eth_key.to_checksum_address(
                )

        else:
            # TODO: Consider a "Repair" mode here
            # signing_private_key, signing_public_key = ...
            pass

        if not checksum_address:
            raise ValueError(
                "Checksum address must be provided for non-federated keyring generation"
            )

        __key_filepaths = cls._generate_key_filepaths(
            account=checksum_address,
            private_key_dir=_private_key_dir,
            public_key_dir=_public_key_dir)
        if encrypting is True:
            encrypting_private_key, encrypting_public_key = _generate_encryption_keys(
            )
            delegating_keying_material = UmbralKeyingMaterial().to_bytes()

            # Derive Wrapping Keys
            password_salt, encrypting_salt, signing_salt, delegating_salt = (
                os.urandom(32) for _ in range(4))

            cls.log.info("About to derive key from password.")
            derived_key_material = derive_key_from_password(
                salt=password_salt, password=password.encode())
            encrypting_wrap_key = _derive_wrapping_key_from_key_material(
                salt=encrypting_salt, key_material=derived_key_material)
            signature_wrap_key = _derive_wrapping_key_from_key_material(
                salt=signing_salt, key_material=derived_key_material)
            delegating_wrap_key = _derive_wrapping_key_from_key_material(
                salt=delegating_salt, key_material=derived_key_material)

            # Encapsulate Private Keys
            encrypting_key_data = encrypting_private_key.to_bytes(
                wrapping_key=encrypting_wrap_key)
            signing_key_data = signing_private_key.to_bytes(
                wrapping_key=signature_wrap_key)
            delegating_key_data = bytes(
                SecretBox(delegating_wrap_key).encrypt(
                    delegating_keying_material))

            # Assemble Private Keys
            encrypting_key_metadata = _assemble_key_data(
                key_data=encrypting_key_data,
                master_salt=password_salt,
                wrap_salt=encrypting_salt)
            signing_key_metadata = _assemble_key_data(
                key_data=signing_key_data,
                master_salt=password_salt,
                wrap_salt=signing_salt)
            delegating_key_metadata = _assemble_key_data(
                key_data=delegating_key_data,
                master_salt=password_salt,
                wrap_salt=delegating_salt)

            #
            # Write Keys
            #

            # Create base paths if the do not exist.
            os.makedirs(abspath(keyring_root), exist_ok=True, mode=0o700)
            if not os.path.isdir(_public_key_dir):
                os.mkdir(_public_key_dir, mode=0o744)  # public dir
            if not os.path.isdir(_private_key_dir):
                os.mkdir(_private_key_dir, mode=0o700)  # private dir

            try:
                rootkey_path = _write_private_keyfile(
                    keypath=__key_filepaths['root'],
                    key_data=encrypting_key_metadata,
                    serializer=cls._private_key_serializer)

                sigkey_path = _write_private_keyfile(
                    keypath=__key_filepaths['signing'],
                    key_data=signing_key_metadata,
                    serializer=cls._private_key_serializer)

                delegating_key_path = _write_private_keyfile(
                    keypath=__key_filepaths['delegating'],
                    key_data=delegating_key_metadata,
                    serializer=cls._private_key_serializer)

                # Write Public Keys
                root_keypath = _write_public_keyfile(
                    __key_filepaths['root_pub'],
                    encrypting_public_key.to_bytes())
                signing_keypath = _write_public_keyfile(
                    __key_filepaths['signing_pub'],
                    signing_public_key.to_bytes())
            except (PrivateKeyExistsError, FileExistsError):
                raise ExistingKeyringError(
                    f"There is an existing keyring for address '{checksum_address}'"
                )

            # Commit
            keyring_args.update(
                keyring_root=keyring_root or cls.__default_keyring_root,
                root_key_path=rootkey_path,
                pub_root_key_path=root_keypath,
                signing_key_path=sigkey_path,
                pub_signing_key_path=signing_keypath,
                delegating_key_path=delegating_key_path,
            )

        if rest is True:
            if not all(
                (host, curve, checksum_address)
            ):  # TODO: Do we want to allow showing up with an old wallet and generating a new cert?  Probably.
                raise ValueError(
                    "host, checksum_address and curve are required to make a new keyring TLS certificate. Got {}, {}"
                    .format(host, curve))
            private_key, cert = _generate_tls_keys(
                host=host, checksum_address=checksum_address, curve=curve)

            def __serialize_pem(pk):
                return pk.private_bytes(
                    encoding=serialization.Encoding.PEM,
                    format=serialization.PrivateFormat.TraditionalOpenSSL,
                    encryption_algorithm=serialization.BestAvailableEncryption(
                        password=derived_key_material))

            tls_key_path = _write_private_keyfile(
                keypath=__key_filepaths['tls'],
                key_data=__serialize_pem(pk=private_key),
                serializer=None)
            certificate_filepath = _write_tls_certificate(
                full_filepath=__key_filepaths['tls_certificate'],
                certificate=cert)
            keyring_args.update(tls_certificate_path=certificate_filepath,
                                tls_key_path=tls_key_path)

        keyring_instance = cls(account=checksum_address, **keyring_args)
        return keyring_instance

    @classmethod
    def validate_password(cls, password: str) -> List:
        """
        Validate a password and return True or raise an error with a failure reason.

        NOTICE: Do not raise inside this function.
        """
        rules = (
            (bool(password), 'Password must not be blank.'),
            (len(password) >= cls.MINIMUM_PASSWORD_LENGTH,
             f'Password must be at least {cls.MINIMUM_PASSWORD_LENGTH} characters long.'
             ),
        )

        failures = list()
        for rule, failure_message in rules:
            if not rule:
                failures.append(failure_message)
        return failures

    def destroy(self):
        base_filepaths = self._generate_base_filepaths(
            keyring_root=self.__keyring_root)
        public_key_dir = base_filepaths['public_key_dir']
        private_key_dir = base_filepaths['private_key_dir']
        keypaths = self._generate_key_filepaths(
            account=self.checksum_address,
            public_key_dir=public_key_dir,
            private_key_dir=private_key_dir)

        # Remove the parsed paths from the disk, weather they exist or not.
        for filepath in keypaths.values():
            with contextlib.suppress(FileNotFoundError):
                os.remove(filepath)
示例#32
0
from nucypher.network.teachers import TEACHER_NODES

NO_BLOCKCHAIN_CONNECTION.bool_value(False)

CHARACTER_DESTRUCTION = '''
Delete all {name} character files including:
    - Private and Public Keys ({keystore})
    - Known Nodes             ({nodestore})
    - Node Configuration File ({config})
    - Database                ({database})

Are you sure?'''

SUCCESSFUL_DESTRUCTION = "Successfully destroyed NuCypher configuration"

LOG = Logger('cli.actions')


class UnknownIPAddress(RuntimeError):
    pass


def get_password_from_prompt(prompt: str = "Enter password",
                             envvar: str = '',
                             confirm: bool = False) -> str:
    password = os.environ.get(envvar, NO_PASSWORD)
    if password is NO_PASSWORD:  # Collect password, prefer env var
        password = click.prompt(prompt,
                                confirmation_prompt=confirm,
                                hide_input=True)
    return password
示例#33
0
class FilePasswordDB:
    """
    A file-based, text-based username/password database.

    Records in the datafile for this class are delimited by a particular
    string.  The username appears in a fixed field of the columns delimited
    by this string, as does the password.  Both fields are specifiable.  If
    the passwords are not stored plaintext, a hash function must be supplied
    to convert plaintext passwords to the form stored on disk and this
    CredentialsChecker will only be able to check IUsernamePassword
    credentials.  If the passwords are stored plaintext,
    IUsernameHashedPassword credentials will be checkable as well.
    """

    cache = False
    _credCache = None
    _cacheTimestamp = 0
    _log = Logger()

    def __init__(self,
                 filename,
                 delim=b':',
                 usernameField=0,
                 passwordField=1,
                 caseSensitive=True,
                 hash=None,
                 cache=False):
        """
        @type filename: C{str}
        @param filename: The name of the file from which to read username and
        password information.

        @type delim: C{str}
        @param delim: The field delimiter used in the file.

        @type usernameField: C{int}
        @param usernameField: The index of the username after splitting a
        line on the delimiter.

        @type passwordField: C{int}
        @param passwordField: The index of the password after splitting a
        line on the delimiter.

        @type caseSensitive: C{bool}
        @param caseSensitive: If true, consider the case of the username when
        performing a lookup.  Ignore it otherwise.

        @type hash: Three-argument callable or C{None}
        @param hash: A function used to transform the plaintext password
        received over the network to a format suitable for comparison
        against the version stored on disk.  The arguments to the callable
        are the username, the network-supplied password, and the in-file
        version of the password.  If the return value compares equal to the
        version stored on disk, the credentials are accepted.

        @type cache: C{bool}
        @param cache: If true, maintain an in-memory cache of the
        contents of the password file.  On lookups, the mtime of the
        file will be checked, and the file will only be re-parsed if
        the mtime is newer than when the cache was generated.
        """
        self.filename = filename
        self.delim = delim
        self.ufield = usernameField
        self.pfield = passwordField
        self.caseSensitive = caseSensitive
        self.hash = hash
        self.cache = cache

        if self.hash is None:
            # The passwords are stored plaintext.  We can support both
            # plaintext and hashed passwords received over the network.
            self.credentialInterfaces = (credentials.IUsernamePassword,
                                         credentials.IUsernameHashedPassword)
        else:
            # The passwords are hashed on disk.  We can support only
            # plaintext passwords received over the network.
            self.credentialInterfaces = (credentials.IUsernamePassword, )

    def __getstate__(self):
        d = dict(vars(self))
        for k in '_credCache', '_cacheTimestamp':
            try:
                del d[k]
            except KeyError:
                pass
        return d

    def _cbPasswordMatch(self, matched, username):
        if matched:
            return username
        else:
            return failure.Failure(error.UnauthorizedLogin())

    def _loadCredentials(self):
        """
        Loads the credentials from the configured file.

        @return: An iterable of C{username, password} couples.
        @rtype: C{iterable}

        @raise UnauthorizedLogin: when failing to read the credentials from the
            file.
        """
        try:
            with open(self.filename, "rb") as f:
                for line in f:
                    line = line.rstrip()
                    parts = line.split(self.delim)

                    if self.ufield >= len(parts) or self.pfield >= len(parts):
                        continue
                    if self.caseSensitive:
                        yield parts[self.ufield], parts[self.pfield]
                    else:
                        yield parts[self.ufield].lower(), parts[self.pfield]
        except IOError as e:
            self._log.error("Unable to load credentials db: {e!r}", e=e)
            raise error.UnauthorizedLogin()

    def getUser(self, username):
        if not self.caseSensitive:
            username = username.lower()

        if self.cache:
            if self._credCache is None or os.path.getmtime(
                    self.filename) > self._cacheTimestamp:
                self._cacheTimestamp = os.path.getmtime(self.filename)
                self._credCache = dict(self._loadCredentials())
            return username, self._credCache[username]
        else:
            for u, p in self._loadCredentials():
                if u == username:
                    return u, p
            raise KeyError(username)

    def requestAvatarId(self, c):
        try:
            u, p = self.getUser(c.username)
        except KeyError:
            return defer.fail(error.UnauthorizedLogin())
        else:
            up = credentials.IUsernamePassword(c, None)
            if self.hash:
                if up is not None:
                    h = self.hash(up.username, up.password, p)
                    if h == p:
                        return defer.succeed(u)
                return defer.fail(error.UnauthorizedLogin())
            else:
                return defer.maybeDeferred(c.checkPassword, p).addCallback(
                    self._cbPasswordMatch, u)
示例#34
0
def ursula(click_config,
           action,
           dev,
           quiet,
           dry_run,
           force,
           lonely,
           network,
           teacher_uri,
           min_stake,
           rest_host,
           rest_port,
           db_filepath,
           checksum_address,
           withdraw_address,
           federated_only,
           poa,
           config_root,
           config_file,
           provider_uri,
           recompile_solidity,
           no_registry,
           registry_filepath,
           value,
           duration,
           index,
           list_
           ) -> None:
    """
    Manage and run an "Ursula" PRE node.

    \b
    Actions
    -------------------------------------------------
    \b
    init              Create a new Ursula node configuration.
    view              View the Ursula node's configuration.
    run               Run an "Ursula" node.
    save-metadata     Manually write node metadata to disk without running
    forget            Forget all known nodes.
    destroy           Delete Ursula node configuration.
    stake             Manage stakes for this node.
    confirm-activity  Manually confirm-activity for the current period.
    divide-stake      Divide an existing stake.
    collect-reward    Withdraw staking reward.

    """

    #
    # Boring Setup Stuff
    #
    if not quiet:
        log = Logger('ursula.cli')

    if click_config.debug and quiet:
        raise click.BadOptionUsage(option_name="quiet", message="--debug and --quiet cannot be used at the same time.")

    if not click_config.json_ipc and not click_config.quiet:
        click.secho(URSULA_BANNER.format(checksum_address or ''))

    #
    # Pre-Launch Warnings
    #
    if not click_config.quiet:
        if dev:
            click.secho("WARNING: Running in Development mode", fg='yellow')
        if force:
            click.secho("WARNING: Force is enabled", fg='yellow')

    #
    # Unauthenticated Configurations & Unconfigured Ursula Control
    #
    if action == "init":
        """Create a brand-new persistent Ursula"""

        if not network:
            raise click.BadArgumentUsage('--network is required to initialize a new configuration.')

        if dev:
            click_config.emitter(message="WARNING: Using temporary storage area", color='yellow')

        if not config_root:                         # Flag
            config_root = click_config.config_file  # Envvar

        if not rest_host:
            rest_host = click.prompt("Enter Ursula's public-facing IPv4 address")  # TODO: Remove this step

        ursula_config = UrsulaConfiguration.generate(password=click_config._get_password(confirm=True),
                                                     config_root=config_root,
                                                     rest_host=rest_host,
                                                     rest_port=rest_port,
                                                     db_filepath=db_filepath,
                                                     domains={network} if network else None,
                                                     federated_only=federated_only,
                                                     checksum_public_address=checksum_address,
                                                     no_registry=federated_only or no_registry,
                                                     registry_filepath=registry_filepath,
                                                     provider_uri=provider_uri,
                                                     poa=poa)

        painting.paint_new_installation_help(new_configuration=ursula_config, config_root=config_root, config_file=config_file)
        return

    #
    # Configured Ursulas
    #

    # Development Configuration
    if dev:
        ursula_config = UrsulaConfiguration(dev_mode=True,
                                            domains={TEMPORARY_DOMAIN},
                                            poa=poa,
                                            registry_filepath=registry_filepath,
                                            provider_uri=provider_uri,
                                            checksum_public_address=checksum_address,
                                            federated_only=federated_only,
                                            rest_host=rest_host,
                                            rest_port=rest_port,
                                            db_filepath=db_filepath)
    # Authenticated Configurations
    else:

        # Domains -> bytes | or default
        domains = [bytes(network, encoding='utf-8')] if network else None

        # Load Ursula from Configuration File
        ursula_config = UrsulaConfiguration.from_configuration_file(filepath=config_file,
                                                                    domains=domains,
                                                                    registry_filepath=registry_filepath,
                                                                    provider_uri=provider_uri,
                                                                    rest_host=rest_host,
                                                                    rest_port=rest_port,
                                                                    db_filepath=db_filepath,
                                                                    poa=poa)

        click_config.unlock_keyring(character_configuration=ursula_config)

    #
    # Connect to Blockchain (Non-Federated)
    #

    if not ursula_config.federated_only:
        click_config.connect_to_blockchain(character_configuration=ursula_config,
                                           recompile_contracts=recompile_solidity)

    click_config.ursula_config = ursula_config  # Pass Ursula's config onto staking sub-command

    #
    # Launch Warnings
    #

    if ursula_config.federated_only:
        click_config.emitter(message="WARNING: Running in Federated mode", color='yellow')

    # Seed - Step 1
    teacher_uris = [teacher_uri] if teacher_uri else list()
    teacher_nodes = actions.load_seednodes(teacher_uris=teacher_uris,
                                           min_stake=min_stake,
                                           federated_only=federated_only,
                                           network_middleware=click_config.middleware)

    # Produce - Step 2
    URSULA = ursula_config(known_nodes=teacher_nodes, lonely=lonely)

    #
    # Action Switch
    #

    if action == 'run':
        """Seed, Produce, Run!"""

        # GO!
        try:

            click_config.emitter(
                message="Starting Ursula on {}".format(URSULA.rest_interface),
                color='green',
                bold=True)

            # Ursula Deploy Warnings
            click_config.emitter(
                message="Connecting to {}".format(','.join(str(d, encoding='utf-8') for d in ursula_config.domains)),
                color='green',
                bold=True)

            if not URSULA.federated_only and URSULA.stakes:
                total = URSULA.blockchain.interface.w3.fromWei(URSULA.total_staked, 'ether')
                click_config.emitter(
                    message=f"Staking {total} NU ~ Keep Ursula Online!",
                    color='blue',
                    bold=True)

            if not click_config.debug:
                stdio.StandardIO(UrsulaCommandProtocol(ursula=URSULA))

            if dry_run:
                return  # <-- ABORT -X (Last Chance)

            # Run - Step 3
            node_deployer = URSULA.get_deployer()
            node_deployer.addServices()
            node_deployer.catalogServers(node_deployer.hendrix)
            node_deployer.run()   # <--- Blocking Call (Reactor)

        # Handle Crash
        except Exception as e:
            ursula_config.log.critical(str(e))
            click_config.emitter(
                message="{} {}".format(e.__class__.__name__, str(e)),
                color='red',
                bold=True)
            raise  # Crash :-(

        # Graceful Exit / Crash
        finally:
            click_config.emitter(message="Stopping Ursula", color='green')
            ursula_config.cleanup()
            click_config.emitter(message="Ursula Stopped", color='red')
        return

    elif action == "save-metadata":
        """Manually save a node self-metadata file"""
        metadata_path = ursula.write_node_metadata(node=URSULA)
        return click_config.emitter(message="Successfully saved node metadata to {}.".format(metadata_path), color='green')

    elif action == "view":
        """Paint an existing configuration to the console"""
        response = UrsulaConfiguration._read_configuration_file(filepath=config_file or ursula_config.config_file_location)
        return click_config.emitter(response=response)

    elif action == "forget":
        actions.forget(configuration=ursula_config)
        return

    elif action == "destroy":
        """Delete all configuration files from the disk"""

        if dev:
            message = "'nucypher ursula destroy' cannot be used in --dev mode"
            raise click.BadOptionUsage(option_name='--dev', message=message)

        destroyed_filepath = destroy_system_configuration(config_class=UrsulaConfiguration,
                                                          config_file=config_file,
                                                          network=network,
                                                          config_root=ursula_config.config_file_location,
                                                          force=force)

        return click_config.emitter(message=f"Destroyed {destroyed_filepath}", color='green')

    elif action == 'stake':

        # List only
        if list_:
            live_stakes = list(URSULA.miner_agent.get_all_stakes(miner_address=URSULA.checksum_public_address))
            if not live_stakes:
                click.echo(f"There are no existing stakes for {URSULA.checksum_public_address}")
            painting.paint_stakes(stakes=live_stakes)
            return

        # Confirm new stake init
        if not force:
            click.confirm("Stage a new stake?", abort=True)

        # Validate balance
        balance = URSULA.token_agent.get_balance(address=URSULA.checksum_public_address)
        if balance == 0:
            click.secho(f"{ursula.checksum_public_address} has 0 NU.")
            raise click.Abort
        if not quiet:
            click.echo(f"Current balance: {balance}")

        # Gather stake value
        if not value:
            min_stake_nu = int(URSULA.blockchain.interface.w3.fromWei(MIN_ALLOWED_LOCKED, 'ether'))
            value = click.prompt(f"Enter stake value in NU", type=click.INT, default=min_stake_nu)
        stake_wei = URSULA.blockchain.interface.w3.toWei(value, 'ether')
        stake_nu = value

        # Duration
        if not quiet:
            message = "Minimum duration: {} | Maximum Duration: {}".format(MIN_LOCKED_PERIODS, MAX_MINTING_PERIODS)
            click.echo(message)
        if not duration:
            duration = click.prompt("Enter stake duration in periods (1 Period = 24 Hours)", type=STAKE_DURATION)
        start_period = URSULA.miner_agent.get_current_period()
        end_period = start_period + duration

        # Review
        if not force:
            painting.paint_staged_stake(ursula=URSULA,
                                        stake_nu=stake_nu,
                                        stake_wei=stake_wei,
                                        duration=duration,
                                        start_period=start_period,
                                        end_period=end_period)

            if not dev:
                actions.confirm_staged_stake(ursula=URSULA, value=value, duration=duration)

        # Last chance to bail
        if not force:
            click.confirm("Publish staged stake to the blockchain?", abort=True)

        staking_transactions = URSULA.initialize_stake(amount=stake_wei, lock_periods=duration)
        painting.paint_staking_confirmation(ursula=URSULA, transactions=staking_transactions)
        return

    elif action == 'confirm-activity':
        stakes = URSULA.miner_agent.get_all_stakes(miner_address=URSULA.checksum_public_address)
        if len(stakes) == 0:
            click.secho("There are no active stakes for {}".format(URSULA.checksum_public_address))
            return
        URSULA.miner_agent.confirm_activity(node_address=URSULA.checksum_public_address)
        return

    elif action == 'divide-stake':
        """Divide an existing stake by specifying the new target value and end period"""

        stakes = list(URSULA.stakes)
        if len(stakes) == 0:
            click.secho("There are no active stakes for {}".format(URSULA.checksum_public_address))
            return

        if index is None:
            painting.paint_stakes(stakes=stakes)
            index = click.prompt("Select a stake to divide", type=click.IntRange(min=0, max=len(stakes)-1, clamp=False))
        stake_info = stakes[index]
        start_period, end_period, current_stake_wei = stake_info
        current_stake_nu = int(Web3.fromWei(current_stake_wei, 'ether'))

        # Value
        if not value:
            min_value = URSULA.blockchain.interface.w3.fromWei(MIN_ALLOWED_LOCKED, 'ether')
            target_value = click.prompt(f"Enter target value (must be less than {current_stake_nu} NU)",
                                        type=click.IntRange(min=min_value, max=current_stake_nu, clamp=False))
            target_value = Web3.toWei(target_value, 'ether')
        else:
            target_value = value

        # Duration
        if not duration:
            extension = click.prompt("Enter number of periods to extend", type=STAKE_EXTENSION)
        else:
            extension = duration

        if not force:
            new_end_period = end_period + extension
            duration = new_end_period - start_period

            division_message = f"""
{URSULA}
~ Original Stake: {painting.prettify_stake(stake_index=index, stake_info=stake_info)}
            """

            painting.paint_staged_stake(ursula=URSULA,
                                        stake_nu=int(Web3.fromWei(target_value, 'ether')),
                                        stake_wei=target_value,
                                        duration=duration,
                                        start_period=stakes[index][0],
                                        end_period=new_end_period,
                                        division_message=division_message)

            click.confirm("Is this correct?", abort=True)

        txhash_bytes = URSULA.divide_stake(stake_index=index,
                                           target_value=target_value,
                                           additional_periods=extension)

        if not quiet:
            click.secho('\nSuccessfully divided stake', fg='green')
            click.secho(f'Transaction Hash ........... {txhash_bytes.hex()}\n', fg='green')

        # Show the resulting stake list
        painting.paint_stakes(stakes=URSULA.stakes)

        return

    elif action == 'collect-reward':
        """Withdraw staking reward to the specified wallet address"""
        if not force:
            click.confirm(f"Send {URSULA.calculate_reward()} to {URSULA.checksum_public_address}?")

        URSULA.collect_policy_reward(collector_address=withdraw_address or checksum_address)
        URSULA.collect_staking_reward()

    else:
        raise click.BadArgumentUsage("No such argument {}".format(action))
示例#35
0
class HendrixService(service.MultiService):
    """
    HendrixService is a constructor that facilitates the collection of services
    and the extension of resources on the website by subclassing MultiService.
    'application' refers to a WSGI application object: likely a django.core.handlers.wsgi.WSGIHandler
    'resources' refers to a list of Resources with a namespace attribute
    'services' refers to a list of twisted Services to add to the collection.
    """

    log = Logger()

    def __init__(
            self,
            application,
            port=80,  # TODO: When will this ever be optional?  And why 80 by default?
            threadpool=None,
            resources=None,
            services=None,
            loud=False):
        service.MultiService.__init__(self)

        # Create, start and add a thread pool service, which is made available
        # to our WSGIResource within HendrixResource
        if not threadpool:
            self.threadpool = ThreadPool(name="HendrixService")
        else:
            self.threadpool = threadpool

        reactor.addSystemEventTrigger('after', 'shutdown',
                                      self.threadpool.stop)
        ThreadPoolService(self.threadpool).setServiceParent(self)

        # create the base resource and add any additional static resources
        resource = HendrixResource(reactor,
                                   self.threadpool,
                                   application,
                                   loud=loud)
        if resources:
            resources = sorted(resources, key=lambda r: r.namespace)
            for res in resources:
                if hasattr(res, 'get_resources'):
                    for sub_res in res.get_resources():
                        resource.putNamedChild(sub_res)
                else:
                    resource.putNamedChild(res)

        factory = server.Site(resource)
        # add a tcp server that binds to port=port
        main_web_tcp = TCPServer(port, factory)
        main_web_tcp.setName('main_web_tcp')
        # to get this at runtime use
        # hedrix_service.getServiceNamed('main_web_tcp')
        main_web_tcp.setServiceParent(self)

        # add any additional services
        if services:
            for srv_name, srv in services:
                srv.setName(srv_name)
                srv.setServiceParent(self)

    def get_port(self, name):
        "Return the port object associated to our tcp server"
        service = self.getServiceNamed(name)
        return service._port

    def add_server(self, name, protocol, server):
        self.servers[(name, protocol)] = server
示例#36
0
class Felix(Character, NucypherTokenActor):
    """
    A NuCypher ERC20 faucet / Airdrop scheduler.

    Felix is a web application that gives NuCypher *testnet* tokens to registered addresses
    with a scheduled reduction of disbursement amounts, and an HTTP endpoint
    for handling new address registration.

    The main goal of Felix is to provide a source of testnet tokens for
    research and the development of production-ready nucypher dApps.
    """

    _default_crypto_powerups = [SigningPower]

    TEMPLATE_NAME = 'felix.html'

    # Intervals
    DISTRIBUTION_INTERVAL = 60 * 60  # seconds (60*60=1Hr)
    DISBURSEMENT_INTERVAL = 24  # (24) hours
    STAGING_DELAY = 10  # seconds

    # Disbursement
    BATCH_SIZE = 10  # transactions
    MULTIPLIER = 0.95  # 5% reduction of previous stake is 0.95, for example
    MINIMUM_DISBURSEMENT = 1e18  # NuNits
    ETHER_AIRDROP_AMOUNT = int(2e18)  # Wei

    # Node Discovery
    LEARNING_TIMEOUT = 30  # seconds
    _SHORT_LEARNING_DELAY = 60  # seconds
    _LONG_LEARNING_DELAY = 120  # seconds
    _ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN = 1

    # Twisted
    _CLOCK = reactor
    _AIRDROP_QUEUE = dict()

    class NoDatabase(RuntimeError):
        pass

    def __init__(self,
                 db_filepath: str,
                 rest_host: str,
                 rest_port: int,
                 client_password: str = None,
                 crash_on_error: bool = False,
                 economics: TokenEconomics = None,
                 distribute_ether: bool = True,
                 *args,
                 **kwargs):

        # Character
        super().__init__(*args, **kwargs)
        self.log = Logger(f"felix-{self.checksum_address[-6::]}")

        # Network
        self.rest_port = rest_port
        self.rest_host = rest_host
        self.rest_app = NOT_RUNNING
        self.crash_on_error = crash_on_error

        # Database
        self.db_filepath = db_filepath
        self.db = NO_DATABASE_AVAILABLE
        self.db_engine = create_engine(f'sqlite:///{self.db_filepath}',
                                       convert_unicode=True)

        # Blockchain
        transacting_power = TransactingPower(blockchain=self.blockchain,
                                             password=client_password,
                                             account=self.checksum_address)
        self._crypto_power.consume_power_up(transacting_power)

        self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)
        self.reserved_addresses = [
            self.checksum_address, BlockchainInterface.NULL_ADDRESS
        ]

        # Update reserved addresses with deployed contracts
        existing_entries = list(self.blockchain.registry.enrolled_addresses)
        self.reserved_addresses.extend(existing_entries)

        # Distribution
        self.__distributed = 0  # Track NU Output
        self.__airdrop = 0  # Track Batch
        self.__disbursement = 0  # Track Quantity
        self._distribution_task = LoopingCall(f=self.airdrop_tokens)
        self._distribution_task.clock = self._CLOCK
        self.start_time = NOT_RUNNING

        if not economics:
            economics = TokenEconomics()
        self.economics = economics

        self.MAXIMUM_DISBURSEMENT = economics.maximum_allowed_locked
        self.INITIAL_DISBURSEMENT = economics.minimum_allowed_locked

        # Optionally send ether with each token transaction
        self.distribute_ether = distribute_ether

        # Banner
        self.log.info(FELIX_BANNER.format(self.checksum_address))

    def __repr__(self):
        class_name = self.__class__.__name__
        r = f'{class_name}(checksum_address={self.checksum_address}, db_filepath={self.db_filepath})'
        return r

    def make_web_app(self):
        from flask import request
        from flask_sqlalchemy import SQLAlchemy

        # WSGI/Flask Service
        short_name = bytes(self.stamp).hex()[:6]
        self.rest_app = Flask(f"faucet-{short_name}",
                              template_folder=TEMPLATES_DIR)
        self.rest_app.config[
            'SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{self.db_filepath}'
        try:
            self.rest_app.secret_key = sha256(
                os.environ['NUCYPHER_FELIX_DB_SECRET'].encode())  # uses envvar
        except KeyError:
            raise OSError(
                "The 'NUCYPHER_FELIX_DB_SECRET' is not set.  Export your application secret and try again."
            )

        # Database
        self.db = SQLAlchemy(self.rest_app)

        # Database Tables
        class Recipient(self.db.Model):
            """
            The one and only table in Felix's database; Used to track recipients and airdrop metadata.
            """

            __tablename__ = 'recipient'

            id = self.db.Column(self.db.Integer, primary_key=True)
            address = self.db.Column(self.db.String,
                                     unique=True,
                                     nullable=False)
            joined = self.db.Column(self.db.DateTime,
                                    nullable=False,
                                    default=datetime.utcnow)
            total_received = self.db.Column(self.db.String,
                                            default='0',
                                            nullable=False)
            last_disbursement_amount = self.db.Column(self.db.String,
                                                      nullable=False,
                                                      default=0)
            last_disbursement_time = self.db.Column(self.db.DateTime,
                                                    nullable=True,
                                                    default=None)
            is_staking = self.db.Column(self.db.Boolean,
                                        nullable=False,
                                        default=False)

            def __repr__(self):
                return f'{self.__class__.__name__}(id={self.id})'

        self.Recipient = Recipient  # Bind to outer class

        # Flask decorators
        rest_app = self.rest_app
        limiter = Limiter(self.rest_app,
                          key_func=get_remote_address,
                          headers_enabled=True)

        #
        # REST Routes
        #

        @rest_app.route("/", methods=['GET'])
        @limiter.limit("100/day;20/hour;1/minute")
        def home():
            rendering = render_template(self.TEMPLATE_NAME)
            return rendering

        @rest_app.route("/register", methods=['POST'])
        @limiter.limit("5 per day")
        def register():
            """Handle new recipient registration via POST request."""
            try:
                new_address = request.form['address']
            except KeyError:
                return Response(status=400)  # TODO

            if not eth_utils.is_checksum_address(new_address):
                return Response(status=400)  # TODO

            if new_address in self.reserved_addresses:
                return Response(status=400)  # TODO

            try:
                with ThreadedSession(self.db_engine) as session:

                    existing = Recipient.query.filter_by(
                        address=new_address).all()
                    if existing:
                        # Address already exists; Abort
                        self.log.debug(f"{new_address} is already enrolled.")
                        return Response(status=400)

                    # Create the record
                    recipient = Recipient(address=new_address,
                                          joined=datetime.now())
                    session.add(recipient)
                    session.commit()

            except Exception as e:
                # Pass along exceptions to the logger
                self.log.critical(str(e))
                raise

            else:
                return Response(status=200)  # TODO

        return rest_app

    def create_tables(self) -> None:
        self.make_web_app()
        return self.db.create_all(app=self.rest_app)

    def start(self,
              host: str,
              port: int,
              web_services: bool = True,
              distribution: bool = True,
              crash_on_error: bool = False):

        self.crash_on_error = crash_on_error

        if self.start_time is not NOT_RUNNING:
            raise RuntimeError("Felix is already running.")

        self.start_time = maya.now()
        payload = {"wsgi": self.rest_app, "http_port": port}
        deployer = HendrixDeploy(action="start", options=payload)

        if distribution is True:
            self.start_distribution()

        if web_services is True:
            deployer.run()  # <-- Blocking call (Reactor)

    def start_distribution(self, now: bool = True) -> bool:
        """Start token distribution"""
        self.log.info(NU_BANNER)
        self.log.info("Starting NU Token Distribution | START")
        if self.token_balance == NU.ZERO():
            raise self.ActorError(
                f"Felix address {self.checksum_address} has 0 NU tokens.")
        self._distribution_task.start(interval=self.DISTRIBUTION_INTERVAL,
                                      now=now)
        return True

    def stop_distribution(self) -> bool:
        """Start token distribution"""
        self.log.info("Stopping NU Token Distribution | STOP")
        self._distribution_task.stop()
        return True

    def __calculate_disbursement(self, recipient) -> int:
        """Calculate the next reward for a recipient once the are selected for distribution"""

        # Initial Reward - sets the future rates
        if recipient.last_disbursement_time is None:
            amount = self.INITIAL_DISBURSEMENT

        # Cap reached, We'll continue to leak the minimum disbursement
        elif int(recipient.total_received) >= self.MAXIMUM_DISBURSEMENT:
            amount = self.MINIMUM_DISBURSEMENT

        # Calculate the next disbursement
        else:
            amount = math.ceil(
                int(recipient.last_disbursement_amount) * self.MULTIPLIER)
            if amount < self.MINIMUM_DISBURSEMENT:
                amount = self.MINIMUM_DISBURSEMENT

        return int(amount)

    def __transfer(self, disbursement: int, recipient_address: str) -> str:
        """Perform a single token transfer transaction from one account to another."""

        self.__disbursement += 1
        receipt = self.token_agent.transfer(
            amount=disbursement,
            target_address=recipient_address,
            sender_address=self.checksum_address)
        txhash = receipt['transactionHash']
        if self.distribute_ether:
            ether = self.ETHER_AIRDROP_AMOUNT
            transaction = {
                'to': recipient_address,
                'from': self.checksum_address,
                'value': ether,
                'gasPrice': self.blockchain.client.gasPrice
            }
            ether_txhash = self.blockchain.client.send_transaction(transaction)

            self.log.info(
                f"Disbursement #{self.__disbursement} OK | NU {txhash.hex()[-6:]} | ETH {ether_txhash.hex()[:6]} "
                f"({str(NU(disbursement, 'NuNit'))} + {self.ETHER_AIRDROP_AMOUNT} wei) -> {recipient_address}"
            )

        else:
            self.log.info(
                f"Disbursement #{self.__disbursement} OK | {txhash.hex()[-6:]} |"
                f"({str(NU(disbursement, 'NuNit'))} -> {recipient_address}")

        return txhash

    def airdrop_tokens(self):
        """
        Calculate airdrop eligibility via faucet registration
        and transfer tokens to selected recipients.
        """

        with ThreadedSession(self.db_engine) as session:
            population = session.query(self.Recipient).count()

        message = f"{population} registered faucet recipients; " \
                  f"Distributed {str(NU(self.__distributed, 'NuNit'))} since {self.start_time.slang_time()}."
        self.log.debug(message)
        if population is 0:
            return  # Abort - no recipients are registered.

        # For filtration
        since = datetime.now() - timedelta(hours=self.DISBURSEMENT_INTERVAL)

        datetime_filter = or_(self.Recipient.last_disbursement_time <= since,
                              self.Recipient.last_disbursement_time ==
                              None)  # This must be `==` not `is`

        with ThreadedSession(self.db_engine) as session:
            candidates = session.query(
                self.Recipient).filter(datetime_filter).all()
            if not candidates:
                self.log.info("No eligible recipients this round.")
                return

        # Discard invalid addresses, in-depth
        invalid_addresses = list()

        def siphon_invalid_entries(candidate):
            address_is_valid = eth_utils.is_checksum_address(candidate.address)
            if not address_is_valid:
                invalid_addresses.append(candidate.address)
            return address_is_valid

        candidates = list(filter(siphon_invalid_entries, candidates))

        if invalid_addresses:
            self.log.info(
                f"{len(invalid_addresses)} invalid entries detected. Pruning database."
            )

            # TODO: Is this needed? - Invalid entries are rejected at the endpoint view.
            # Prune database of invalid records
            # with ThreadedSession(self.db_engine) as session:
            #     bad_eggs = session.query(self.Recipient).filter(self.Recipient.address in invalid_addresses).all()
            #     for egg in bad_eggs:
            #         session.delete(egg.id)
            #     session.commit()

        if not candidates:
            self.log.info("No eligible recipients this round.")
            return

        d = threads.deferToThread(self.__do_airdrop, candidates=candidates)
        self._AIRDROP_QUEUE[self.__airdrop] = d
        return d

    def __do_airdrop(self, candidates: list):

        self.log.info(f"Staging Airdrop #{self.__airdrop}.")

        # Staging
        staged_disbursements = [(r, self.__calculate_disbursement(recipient=r))
                                for r in candidates]
        batches = list(
            staged_disbursements[index:index + self.BATCH_SIZE]
            for index in range(0, len(staged_disbursements), self.BATCH_SIZE))
        total_batches = len(batches)

        self.log.info("====== Staged Airdrop ======")
        for recipient, disbursement in staged_disbursements:
            self.log.info(f"{recipient.address} ... {str(disbursement)[:-18]}")
        self.log.info("==========================")

        # Staging Delay
        self.log.info(
            f"Airdrop will commence in {self.STAGING_DELAY} seconds...")
        if self.STAGING_DELAY > 3:
            time.sleep(self.STAGING_DELAY - 3)
        for i in range(3):
            time.sleep(1)
            self.log.info(f"NU Token airdrop starting in {3 - i} seconds...")

        # Slowly, in series...
        for batch, staged_disbursement in enumerate(batches, start=1):
            self.log.info(f"======= Batch #{batch} ========")

            for recipient, disbursement in staged_disbursement:

                # Perform the transfer... leaky faucet.
                self.__transfer(disbursement=disbursement,
                                recipient_address=recipient.address)
                self.__distributed += disbursement

                # Update the database record
                recipient.last_disbursement_amount = str(disbursement)
                recipient.total_received = str(
                    int(recipient.total_received) + disbursement)
                recipient.last_disbursement_time = datetime.now()

                self.db.session.add(recipient)
                self.db.session.commit()

            # end inner loop
            self.log.info(
                f"Completed Airdrop #{self.__airdrop} Batch #{batch} of {total_batches}."
            )

        # end outer loop
        now = maya.now()
        next_interval_slang = now.add(
            seconds=self.DISTRIBUTION_INTERVAL).slang_time()
        self.log.info(
            f"Completed Airdrop #{self.__airdrop}; Next airdrop is {next_interval_slang}."
        )

        del self._AIRDROP_QUEUE[self.__airdrop]
        self.__airdrop += 1
示例#37
0
class EthereumContractAgent:
    """
    Base class for ethereum contract wrapper types that interact with blockchain contract instances
    """

    registry_contract_name = NotImplemented
    _forward_address = True
    _proxy_name = None

    # TODO - #842: Gas Management
    DEFAULT_TRANSACTION_GAS_LIMITS = {}

    class ContractNotDeployed(Exception):
        pass

    def __init__(self,
                 registry: BaseContractRegistry,
                 provider_uri: str = None,
                 contract: Contract = None,
                 transaction_gas: int = None) -> None:

        self.log = Logger(self.__class__.__name__)

        self.registry = registry

        # NOTE: Entry-point for multi-provider support
        self.blockchain = BlockchainInterfaceFactory.get_or_create_interface(
            provider_uri=provider_uri)

        if contract is None:  # Fetch the contract
            contract = self.blockchain.get_contract_by_name(
                registry=self.registry,
                contract_name=self.registry_contract_name,
                proxy_name=self._proxy_name,
                use_proxy_address=self._forward_address)
        self.__contract = contract

        if not transaction_gas:
            transaction_gas = EthereumContractAgent.DEFAULT_TRANSACTION_GAS_LIMITS
        self.transaction_gas = transaction_gas

        super().__init__()
        self.log.info("Initialized new {} for {} with {} and {}".format(
            self.__class__.__name__, self.contract.address,
            self.blockchain.provider_uri, self.registry))

    def __repr__(self):
        class_name = self.__class__.__name__
        r = "{}(registry={}, contract={})"
        return r.format(class_name, self.registry, self.registry_contract_name)

    def __eq__(self, other):
        return bool(self.contract.address == other.contract.address)

    @property
    def contract(self):
        return self.__contract

    @property
    def contract_address(self):
        return self.__contract.address

    @property
    def contract_name(self) -> str:
        return self.registry_contract_name

    @property
    def owner(self):
        if not self._proxy_name:
            # Only upgradeable + ownable contracts can implement ownership transference.
            return None
        return self.contract.functions.owner().call()

    @validate_checksum_address
    def transfer_ownership(self,
                           sender_address: str,
                           checksum_address: str,
                           transaction_gas_limit: int = None) -> dict:
        contract_function = self.contract.functions.transferOwnership(
            checksum_address)
        receipt = self.blockchain.send_transaction(
            contract_function=contract_function,
            sender_address=sender_address,
            transaction_gas_limit=transaction_gas_limit)
        return receipt
示例#38
0
class SSHFactory(protocol.Factory):
    """
    A Factory for SSH servers.
    """

    _log = Logger()
    protocol = transport.SSHServerTransport

    services = {
        b"ssh-userauth": userauth.SSHUserAuthServer,
        b"ssh-connection": connection.SSHConnection,
    }

    def startFactory(self):
        """
        Check for public and private keys.
        """
        if not hasattr(self, "publicKeys"):
            self.publicKeys = self.getPublicKeys()
        if not hasattr(self, "privateKeys"):
            self.privateKeys = self.getPrivateKeys()
        if not self.publicKeys or not self.privateKeys:
            raise error.ConchError("no host keys, failing")
        if not hasattr(self, "primes"):
            self.primes = self.getPrimes()

    def buildProtocol(self, addr):
        """
        Create an instance of the server side of the SSH protocol.

        @type addr: L{twisted.internet.interfaces.IAddress} provider
        @param addr: The address at which the server will listen.

        @rtype: L{twisted.conch.ssh.transport.SSHServerTransport}
        @return: The built transport.
        """
        t = protocol.Factory.buildProtocol(self, addr)
        t.supportedPublicKeys = self.privateKeys.keys()
        if not self.primes:
            self._log.info(
                "disabling non-fixed-group key exchange algorithms "
                "because we cannot find moduli file"
            )
            t.supportedKeyExchanges = [
                kexAlgorithm
                for kexAlgorithm in t.supportedKeyExchanges
                if _kex.isFixedGroup(kexAlgorithm) or _kex.isEllipticCurve(kexAlgorithm)
            ]
        return t

    def getPublicKeys(self):
        """
        Called when the factory is started to get the public portions of the
        servers host keys.  Returns a dictionary mapping SSH key types to
        public key strings.

        @rtype: L{dict}
        """
        raise NotImplementedError("getPublicKeys unimplemented")

    def getPrivateKeys(self):
        """
        Called when the factory is started to get the  private portions of the
        servers host keys.  Returns a dictionary mapping SSH key types to
        L{twisted.conch.ssh.keys.Key} objects.

        @rtype: L{dict}
        """
        raise NotImplementedError("getPrivateKeys unimplemented")

    def getPrimes(self):
        """
        Called when the factory is started to get Diffie-Hellman generators and
        primes to use.  Returns a dictionary mapping number of bits to lists
        of tuple of (generator, prime).

        @rtype: L{dict}
        """

    def getDHPrime(self, bits):
        """
        Return a tuple of (g, p) for a Diffe-Hellman process, with p being as
        close to bits bits as possible.

        @type bits: L{int}
        @rtype:     L{tuple}
        """
        primesKeys = sorted(self.primes.keys(), key=lambda i: abs(i - bits))
        realBits = primesKeys[0]
        return random.choice(self.primes[realBits])

    def getService(self, transport, service):
        """
        Return a class to use as a service for the given transport.

        @type transport:    L{transport.SSHServerTransport}
        @type service:      L{bytes}
        @rtype:             subclass of L{service.SSHService}
        """
        if service == b"ssh-userauth" or hasattr(transport, "avatar"):
            return self.services[service]
示例#39
0
GLOBAL_CFG = "/etc/mailmail"
LOCAL_CFG = os.path.expanduser("~/.twisted/mailmail")
SMARTHOST = '127.0.0.1'

ERROR_FMT = """\
Subject: Failed Message Delivery

  Message delivery failed.  The following occurred:

  %s
--
The Twisted sendmail application.
"""

_logObserver = textFileLogObserver(sys.stderr)
_log = Logger(observer=_logObserver)


class Options:
    """
    Store the values of the parsed command-line options to the I{mailmail}
    script.

    @type to: L{list} of L{str}
    @ivar to: The addresses to which to deliver this message.

    @type sender: L{str}
    @ivar sender: The address from which this message is being sent.

    @type body: C{file}
    @ivar body: The object from which the message is to be read.
示例#40
0
class Crawler(Learner):
    """
    Obtain Blockchain information for Monitor and output to a DB.
    """

    _SHORT_LEARNING_DELAY = 2
    _LONG_LEARNING_DELAY = 30
    _ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN = 25

    LEARNING_TIMEOUT = 10
    DEFAULT_REFRESH_RATE = 60  # seconds
    REFRESH_RATE_WINDOW = 0.25

    # InfluxDB Line Protocol Format (note the spaces, commas):
    # +-----------+--------+-+---------+-+---------+
    # |measurement|,tag_set| |field_set| |timestamp|
    # +-----------+--------+-+---------+-+---------+
    NODE_MEASUREMENT = 'crawler_node_info'
    NODE_LINE_PROTOCOL = '{measurement},staker_address={staker_address} ' \
                         'worker_address="{worker_address}",' \
                         'start_date={start_date},' \
                         'end_date={end_date},' \
                         'stake={stake},' \
                         'locked_stake={locked_stake},' \
                         'current_period={current_period}i,' \
                         'last_confirmed_period={last_confirmed_period}i ' \
                         '{timestamp}'

    EVENT_MEASUREMENT = 'crawler_event_info'
    EVENT_LINE_PROTOCOL = '{measurement},txhash={txhash} ' \
                          'contract_name="{contract_name}",' \
                          'contract_address="{contract_address}",' \
                          'event_name="{event_name}",' \
                          'block_number={block_number}i,' \
                          'args="{args}" ' \
                          '{timestamp}'

    INFLUX_DB_NAME = 'network'
    INFLUX_RETENTION_POLICY_NAME = 'network_info_retention'

    # TODO: review defaults for retention policy
    RETENTION = '5w'  # Weeks
    REPLICATION = '1'

    METRICS_ENDPOINT = 'stats'
    DEFAULT_CRAWLER_HTTP_PORT = 9555

    ERROR_EVENTS = {
        StakingEscrowAgent: ['Slashed'],
        AdjudicatorAgent: ['IncorrectCFragVerdict'],
    }

    STAKER_PAGINATION_SIZE = 200

    def __init__(self,
                 influx_host: str,
                 influx_port: int,
                 crawler_http_port: int = DEFAULT_CRAWLER_HTTP_PORT,
                 registry: BaseContractRegistry = None,
                 node_storage_filepath: str = CrawlerNodeStorage.
                 DEFAULT_DB_FILEPATH,
                 refresh_rate=DEFAULT_REFRESH_RATE,
                 restart_on_error=True,
                 *args,
                 **kwargs):

        # Settings
        self.federated_only = False  # Nope - for compatibility with Learner TODO # nucypher/466
        Teacher.set_federated_mode(False)

        self.registry = registry or InMemoryContractRegistry.from_latest_publication(
        )
        self.economics = EconomicsFactory.get_economics(registry=self.registry)
        self._refresh_rate = refresh_rate
        self._restart_on_error = restart_on_error

        # TODO: Needs cleanup
        # Tracking
        node_storage = CrawlerNodeStorage(
            storage_filepath=node_storage_filepath)

        class MonitoringTracker(FleetSensor):
            def record_fleet_state(self, *args, **kwargs):
                new_state_or_none = super().record_fleet_state(*args, **kwargs)
                if new_state_or_none:
                    _, new_state = new_state_or_none
                    state = self.abridged_state_details(new_state)
                    node_storage.store_state_metadata(state)

        self.tracker_class = MonitoringTracker

        super().__init__(save_metadata=True,
                         node_storage=node_storage,
                         verify_node_bonding=False,
                         *args,
                         **kwargs)

        self.log = Logger(self.__class__.__name__)
        self.log.info(
            f"Storing node metadata in DB: {node_storage.db_filepath}")
        self.log.info(
            f"Storing blockchain metadata in DB: {influx_host}:{influx_port}")

        # In-memory Metrics
        self._stats = {'status': 'initializing'}
        self._crawler_client = None

        # Initialize InfluxDB
        self._db_host = influx_host
        self._db_port = influx_port
        self._influx_client = None

        # Agency
        self.staking_agent = ContractAgency.get_agent(StakingEscrowAgent,
                                                      registry=self.registry)

        # Crawler Tasks
        self.__collection_round = 0
        self.__collecting_nodes = False  # thread tracking
        self.__collecting_stats = False
        self.__events_from_block = 0  # from the beginning
        self.__collecting_events = False

        self._node_details_task = DelayedLoopingCall(
            f=self._learn_about_nodes,
            start_delay=random.randint(2, 15))  # random staggered start
        self._stats_collection_task = DelayedLoopingCall(
            f=self._collect_stats,
            threaded=True,
            start_delay=random.randint(2, 15))  # random staggered start
        self._events_collection_task = DelayedLoopingCall(
            f=self._collect_events,
            start_delay=random.randint(2, 15))  # random staggered start

        # JSON Endpoint
        self._crawler_http_port = crawler_http_port
        self._flask = None

    def _initialize_influx(self):
        try:
            db_list = self._influx_client.get_list_database()
        except requests.exceptions.ConnectionError:
            raise ConnectionError(
                f"No connection to InfluxDB at {self._db_host}:{self._db_port}"
            )
        found_db = (list(
            filter(lambda db: db['name'] == self.INFLUX_DB_NAME, db_list)))
        if len(found_db) == 0:
            # db not previously created
            self.log.info(
                f'Database {self.INFLUX_DB_NAME} not found, creating it')
            self._influx_client.create_database(self.INFLUX_DB_NAME)
            self._influx_client.create_retention_policy(
                name=self.INFLUX_RETENTION_POLICY_NAME,
                duration=self.RETENTION,
                replication=self.REPLICATION,
                database=self.INFLUX_DB_NAME,
                default=True)
        else:
            self.log.info(
                f'Database {self.INFLUX_DB_NAME} already exists, no need to create it'
            )

    def learn_from_teacher_node(self, *args, **kwargs):
        try:
            current_teacher = self.current_teacher_node(cycle=False)
        except self.NotEnoughTeachers as e:
            self.log.warn("Can't learn right now: {}".format(e.args[0]))
            return

        new_nodes = super().learn_from_teacher_node(*args, **kwargs)

        # update metadata of teacher - not just in memory but in the underlying storage system (db in this case)
        self.node_storage.store_node_metadata(current_teacher)
        self.node_storage.store_current_teacher(
            current_teacher.checksum_address)

        return new_nodes

    #
    # Measurements
    #

    @property
    def stats(self) -> dict:
        return self._stats

    @collector(label="Projected Stake and Stakers")
    def _measure_future_locked_tokens(self, periods: int = 365):
        period_range = range(1, periods + 1)
        token_counter = dict()
        for day in period_range:
            tokens, stakers = self.staking_agent.get_all_active_stakers(
                periods=day, pagination_size=self.STAKER_PAGINATION_SIZE)
            token_counter[day] = (float(NU.from_nunits(tokens).to_tokens()),
                                  len(stakers))
        return dict(token_counter)

    @collector(label="Top Stakes")
    def _measure_top_stakers(self) -> dict:
        _, stakers = self.staking_agent.get_all_active_stakers(
            periods=1, pagination_size=self.STAKER_PAGINATION_SIZE)
        data = dict(sorted(stakers.items(), key=lambda s: s[1], reverse=True))
        return data

    @collector(label="Staker Confirmation Status")
    def _measure_staker_activity(self) -> dict:
        confirmed, pending, inactive = self.staking_agent.partition_stakers_by_activity(
        )
        stakers = dict()
        stakers['active'] = len(confirmed)
        stakers['pending'] = len(pending)
        stakers['inactive'] = len(inactive)
        return stakers

    @collector(label="Date/Time of Next Period")
    def _measure_start_of_next_period(self) -> str:
        """Returns iso8601 datetime of next period"""
        current_period = datetime_to_period(
            datetime=maya.now(),
            seconds_per_period=self.economics.seconds_per_period)
        next_period = datetime_at_period(
            period=current_period + 1,
            seconds_per_period=self.economics.seconds_per_period,
            start_of_period=True)

        return next_period.iso8601()

    @collector(label="Known Nodes")
    def measure_known_nodes(self):

        #
        # Setup
        #
        current_period = datetime_to_period(
            datetime=maya.now(),
            seconds_per_period=self.economics.seconds_per_period)
        buckets = {
            -1: ('green', 'Confirmed'),  # Confirmed Next Period
            0: ('#e0b32d', 'Pending'),  # Pending Confirmation of Next Period
            current_period: ('#525ae3', 'Idle'),  # Never confirmed
            NULL_ADDRESS:
            ('#d8d9da', 'Headless')  # Headless Staker (No Worker)
        }

        shortest_uptime, newborn = float('inf'), None
        longest_uptime, uptime_king = 0, None

        uptime_template = '{days}d:{hours}h:{minutes}m'

        #
        # Scrape
        #

        payload = defaultdict(list)
        known_nodes = self._crawler_client.get_known_nodes_metadata()
        for staker_address in known_nodes:

            #
            # Confirmation Status Scraping
            #

            last_confirmed_period = self.staking_agent.get_last_committed_period(
                staker_address)
            missing_confirmations = current_period - last_confirmed_period
            worker = self.staking_agent.get_worker_from_staker(staker_address)
            if worker == NULL_ADDRESS:
                # missing_confirmations = NULL_ADDRESS
                continue  # TODO: Skip this DetachedWorker and do not display it
            try:
                color, status_message = buckets[missing_confirmations]
            except KeyError:
                color, status_message = 'red', f'Unconfirmed'
            node_status = {
                'status': status_message,
                'missed_confirmations': missing_confirmations,
                'color': color
            }

            #
            # Uptime Scraping
            #

            now = maya.now()
            timestamp = maya.MayaDT.from_iso8601(
                known_nodes[staker_address]['timestamp'])
            delta = now - timestamp

            node_qualifies_as_newborn = (
                delta.total_seconds() <
                shortest_uptime) and missing_confirmations == -1
            node_qualifies_for_uptime_king = (
                delta.total_seconds() >
                longest_uptime) and missing_confirmations == -1
            if node_qualifies_as_newborn:
                shortest_uptime, newborn = delta.total_seconds(
                ), staker_address
            elif node_qualifies_for_uptime_king:
                longest_uptime, uptime_king = delta.total_seconds(
                ), staker_address

            hours = delta.seconds // 3600
            minutes = delta.seconds % 3600 // 60
            natural_uptime = uptime_template.format(days=delta.days,
                                                    hours=hours,
                                                    minutes=minutes)

            #
            # Aggregate
            #

            known_nodes[staker_address]['status'] = node_status
            known_nodes[staker_address]['uptime'] = natural_uptime
            payload[status_message.lower()].append(known_nodes[staker_address])

        # There are not always winners...
        if newborn:
            known_nodes[newborn]['newborn'] = True
        if uptime_king:
            known_nodes[uptime_king]['uptime_king'] = True
        return payload

    def _collect_stats(self, threaded: bool = True) -> None:
        # TODO: Handle faulty connection to provider (requests.exceptions.ReadTimeout)
        if threaded:
            if self.__collecting_stats:
                self.log.debug(
                    "Skipping Round - Metrics collection thread is already running"
                )
                return
            return reactor.callInThread(self._collect_stats, threaded=False)
        self.__collection_round += 1
        self.__collecting_stats = True

        start = maya.now()
        click.secho(
            f"Scraping Round #{self.__collection_round} ========================",
            color='blue')
        self.log.info("Collecting Statistics...")

        #
        # Read
        #

        # Time
        block = self.staking_agent.blockchain.client.w3.eth.getBlock('latest')
        block_number = block.number
        block_time = block.timestamp  # epoch
        current_period = datetime_to_period(
            datetime=maya.now(),
            seconds_per_period=self.economics.seconds_per_period)
        click.secho("✓ ... Current Period", color='blue')
        next_period = self._measure_start_of_next_period()

        # Nodes
        teacher = self._crawler_client.get_current_teacher_checksum()
        states = self._crawler_client.get_previous_states_metadata()

        known_nodes = self.measure_known_nodes()

        activity = self._measure_staker_activity()

        # Stake
        #future_locked_tokens = self._measure_future_locked_tokens()
        global_locked_tokens = self.staking_agent.get_global_locked_tokens()
        click.secho("✓ ... Global Network Locked Tokens", color='blue')

        top_stakers = self._measure_top_stakers()

        #
        # Write
        #

        self._stats = {
            'blocknumber': block_number,
            'blocktime': block_time,
            'current_period': current_period,
            'next_period': next_period,
            'prev_states': states,
            'current_teacher': teacher,
            'known_nodes': len(self.known_nodes),
            'activity': activity,
            'node_details': known_nodes,
            'global_locked_tokens': global_locked_tokens,
            #'future_locked_tokens': future_locked_tokens,
            'top_stakers': top_stakers,
        }
        done = maya.now()
        delta = done - start
        self.__collecting_stats = False
        click.echo(
            f"Scraping round completed (duration {delta}).",
            color='yellow')  # TODO: Make optional, use emitter, or remove
        click.echo("==========================================")
        self.log.debug(f"Collected new metrics took {delta}.")

    @collector(label="Network Event Details")
    def _collect_events(self, threaded: bool = True):
        if threaded:
            if self.__collecting_events:
                self.log.debug(
                    "Skipping Round - Events collection thread is already running"
                )
                return
            return reactor.callInThread(self._collect_events, threaded=False)
        self.__collecting_events = True

        blockchain_client = self.staking_agent.blockchain.client
        latest_block_number = blockchain_client.block_number
        from_block = self.__events_from_block

        #block_time = latest_block.timestamp  # precision in seconds

        current_period = datetime_to_period(
            datetime=maya.now(),
            seconds_per_period=self.economics.seconds_per_period)

        events_list = list()
        for agent_class, event_names in self.ERROR_EVENTS.items():
            agent = ContractAgency.get_agent(agent_class,
                                             registry=self.registry)
            for event_name in event_names:
                events = [agent.contract.events[event_name]]
                for event in events:
                    entries = event.getLogs(fromBlock=from_block,
                                            toBlock=latest_block_number)
                    for event_record in entries:
                        record = EventRecord(event_record)
                        args = ", ".join(f"{k}:{v}"
                                         for k, v in record.args.items())
                        events_list.append(
                            self.EVENT_LINE_PROTOCOL.format(
                                measurement=self.EVENT_MEASUREMENT,
                                txhash=record.transaction_hash,
                                contract_name=agent.contract_name,
                                contract_address=agent.contract_address,
                                event_name=event_name,
                                block_number=record.block_number,
                                args=args,
                                timestamp=blockchain_client.w3.eth.getBlock(
                                    record.block_number).timestamp,
                            ))

        success = self._influx_client.write_points(
            events_list,
            database=self.INFLUX_DB_NAME,
            time_precision='s',
            batch_size=10000,
            protocol='line')
        self.__events_from_block = latest_block_number
        self.__collecting_events = False
        if not success:
            # TODO: What do we do here - Event hook for alerting?
            self.log.warn(
                f'Unable to write events to database {self.INFLUX_DB_NAME} '
                f'| Period {current_period} starting from block {from_block}')

    @collector(label="Known Node Details")
    def _learn_about_nodes(self, threaded: bool = True):
        if threaded:
            if self.__collecting_nodes:
                self.log.debug(
                    "Skipping Round - Nodes collection thread is already running"
                )
                return
            return reactor.callInThread(self._learn_about_nodes,
                                        threaded=False)
        self.__collecting_nodes = True

        agent = self.staking_agent
        known_nodes = list(self.known_nodes)

        block_time = agent.blockchain.client.get_blocktime(
        )  # precision in seconds
        current_period = datetime_to_period(
            datetime=maya.now(),
            seconds_per_period=self.economics.seconds_per_period)

        log = f'Processing {len(known_nodes)} nodes at {MayaDT(epoch=block_time)} | Period {current_period}'
        self.log.info(log)

        data = list()
        for node in known_nodes:

            staker_address = node.checksum_address
            worker = agent.get_worker_from_staker(staker_address)

            stake = agent.owned_tokens(staker_address)
            staked_nu_tokens = float(NU.from_nunits(stake).to_tokens())
            locked_nu_tokens = float(
                NU.from_nunits(
                    agent.get_locked_tokens(
                        staker_address=staker_address)).to_tokens())

            economics = EconomicsFactory.get_economics(registry=self.registry)
            stakes = StakeList(checksum_address=staker_address,
                               registry=self.registry)
            stakes.refresh()

            if stakes.initial_period is NOT_STAKING:
                continue  # TODO: Skip this measurement for now

            start_date = datetime_at_period(
                stakes.initial_period,
                seconds_per_period=economics.seconds_per_period)
            start_date = start_date.datetime().timestamp()
            end_date = datetime_at_period(
                stakes.terminal_period,
                seconds_per_period=economics.seconds_per_period)
            end_date = end_date.datetime().timestamp()

            last_confirmed_period = agent.get_last_committed_period(
                staker_address)

            num_work_orders = 0  # len(node.work_orders())  # TODO: Only works for is_me with datastore attached

            # TODO: do we need to worry about how much information is in memory if number of nodes is
            #  large i.e. should I check for size of data and write within loop if too big
            data.append(
                self.NODE_LINE_PROTOCOL.format(
                    measurement=self.NODE_MEASUREMENT,
                    staker_address=staker_address,
                    worker_address=worker,
                    start_date=start_date,
                    end_date=end_date,
                    stake=staked_nu_tokens,
                    locked_stake=locked_nu_tokens,
                    current_period=current_period,
                    last_confirmed_period=last_confirmed_period,
                    timestamp=block_time,
                    work_orders=num_work_orders))

        success = self._influx_client.write_points(
            data,
            database=self.INFLUX_DB_NAME,
            time_precision='s',
            batch_size=10000,
            protocol='line')
        self.__collecting_nodes = False
        if not success:
            # TODO: What do we do here - Event hook for alerting?
            self.log.warn(
                f'Unable to write node information to database {self.INFLUX_DB_NAME} at '
                f'{MayaDT(epoch=block_time)} | Period {current_period}')

    def make_flask_server(self):
        """JSON Endpoint"""
        flask = Flask('nucypher-monitor')
        self._flask = flask
        self._flask.config["JSONIFY_PRETTYPRINT_REGULAR"] = True

        @flask.route('/stats', methods=['GET'])
        def stats():
            response = jsonify(self._stats)
            return response

    def _handle_errors(self, *args, **kwargs):
        failure = args[0]
        cleaned_traceback = failure.getTraceback().replace('{', '').replace(
            '}', '')
        if self._restart_on_error:
            self.log.warn(
                f'Unhandled error: {cleaned_traceback}. Attempting to restart crawler'
            )
            if not self._node_details_task.running:
                self.start()
        else:
            self.log.critical(f'Unhandled error: {cleaned_traceback}')

    def start(self, eager: bool = False):
        """Start the crawler if not already running"""
        if not self.is_running:
            self.log.info('Starting Crawler...')
            if self._influx_client is None:
                self._influx_client = InfluxDBClient(
                    host=self._db_host,
                    port=self._db_port,
                    database=self.INFLUX_DB_NAME)
                self._initialize_influx()

            if self._crawler_client is None:
                from monitor.db import CrawlerStorageClient
                self._crawler_client = CrawlerStorageClient()

                # TODO: Maybe?
                # from monitor.db import CrawlerInfluxClient
                # self.crawler_influx_client = CrawlerInfluxClient()

            # start tasks
            node_learner_deferred = self._node_details_task.start(
                interval=random.randint(
                    int(self._refresh_rate * (1 - self.REFRESH_RATE_WINDOW)),
                    self._refresh_rate),
                now=eager)
            collection_deferred = self._stats_collection_task.start(
                interval=random.randint(
                    self._refresh_rate,
                    int(self._refresh_rate * (1 + self.REFRESH_RATE_WINDOW))),
                now=eager)

            # get known last event block
            self.__events_from_block = self._get_last_known_blocknumber()
            events_deferred = self._events_collection_task.start(
                interval=self._refresh_rate, now=eager)

            # hookup error callbacks
            node_learner_deferred.addErrback(self._handle_errors)
            collection_deferred.addErrback(self._handle_errors)
            events_deferred.addErrback(self._handle_errors)

            # Start up
            self.start_learning_loop(now=False)
            self.make_flask_server()
            hx_deployer = HendrixDeploy(action="start",
                                        options={
                                            "wsgi": self._flask,
                                            "http_port":
                                            self._crawler_http_port
                                        })
            hx_deployer.run()  # <--- Blocking Call to Reactor

    def stop(self):
        """Stop the crawler if currently running"""
        if self.is_running:
            self.log.info('Stopping Monitor Crawler')

            # stop tasks
            self._node_details_task.stop()
            self._events_collection_task.stop()
            self._stats_collection_task.stop()

            if self._influx_client is not None:
                self._influx_client.close()
                self._influx_client = None

    @property
    def is_running(self):
        """Returns True if currently running, False otherwise"""
        return self._node_details_task.running

    def _get_last_known_blocknumber(self):
        last_known_blocknumber = 0
        blocknumber_result = list(
            self._influx_client.query(
                f'SELECT MAX(block_number) from {self.EVENT_MEASUREMENT}').
            get_points())
        if len(blocknumber_result) > 0:
            last_known_blocknumber = blocknumber_result[0]['max']

        return last_known_blocknumber
示例#41
0
    def __init__(self,
                 influx_host: str,
                 influx_port: int,
                 crawler_http_port: int = DEFAULT_CRAWLER_HTTP_PORT,
                 registry: BaseContractRegistry = None,
                 node_storage_filepath: str = CrawlerNodeStorage.
                 DEFAULT_DB_FILEPATH,
                 refresh_rate=DEFAULT_REFRESH_RATE,
                 restart_on_error=True,
                 *args,
                 **kwargs):

        # Settings
        self.federated_only = False  # Nope - for compatibility with Learner TODO # nucypher/466
        Teacher.set_federated_mode(False)

        self.registry = registry or InMemoryContractRegistry.from_latest_publication(
        )
        self.economics = EconomicsFactory.get_economics(registry=self.registry)
        self._refresh_rate = refresh_rate
        self._restart_on_error = restart_on_error

        # TODO: Needs cleanup
        # Tracking
        node_storage = CrawlerNodeStorage(
            storage_filepath=node_storage_filepath)

        class MonitoringTracker(FleetSensor):
            def record_fleet_state(self, *args, **kwargs):
                new_state_or_none = super().record_fleet_state(*args, **kwargs)
                if new_state_or_none:
                    _, new_state = new_state_or_none
                    state = self.abridged_state_details(new_state)
                    node_storage.store_state_metadata(state)

        self.tracker_class = MonitoringTracker

        super().__init__(save_metadata=True,
                         node_storage=node_storage,
                         verify_node_bonding=False,
                         *args,
                         **kwargs)

        self.log = Logger(self.__class__.__name__)
        self.log.info(
            f"Storing node metadata in DB: {node_storage.db_filepath}")
        self.log.info(
            f"Storing blockchain metadata in DB: {influx_host}:{influx_port}")

        # In-memory Metrics
        self._stats = {'status': 'initializing'}
        self._crawler_client = None

        # Initialize InfluxDB
        self._db_host = influx_host
        self._db_port = influx_port
        self._influx_client = None

        # Agency
        self.staking_agent = ContractAgency.get_agent(StakingEscrowAgent,
                                                      registry=self.registry)

        # Crawler Tasks
        self.__collection_round = 0
        self.__collecting_nodes = False  # thread tracking
        self.__collecting_stats = False
        self.__events_from_block = 0  # from the beginning
        self.__collecting_events = False

        self._node_details_task = DelayedLoopingCall(
            f=self._learn_about_nodes,
            start_delay=random.randint(2, 15))  # random staggered start
        self._stats_collection_task = DelayedLoopingCall(
            f=self._collect_stats,
            threaded=True,
            start_delay=random.randint(2, 15))  # random staggered start
        self._events_collection_task = DelayedLoopingCall(
            f=self._collect_events,
            start_delay=random.randint(2, 15))  # random staggered start

        # JSON Endpoint
        self._crawler_http_port = crawler_http_port
        self._flask = None
示例#42
0
from twisted.web import resource
from twisted.web.client import FileBodyProducer
from twisted.web.server import NOT_DONE_YET
from twisted.internet import utils, defer

from zope.interface import implementer

from leap.common.files import mkdir_p
from leap.soledad.server import interfaces
from leap.soledad.common.blobs import ACCEPTED_FLAGS


__all__ = ['BlobsResource']


logger = Logger()

# Used for sanitizers, we accept only letters, numbers, '-' and '_'
VALID_STRINGS = re.compile('^[a-zA-Z0-9_-]+$')


# for the future:
# [ ] isolate user avatar in a safer way
# [ ] catch timeout in the server (and delete incomplete upload)
# [ ] chunking (should we do it on the client or on the server?)


@implementer(interfaces.IBlobsBackend)
@implementer(interfaces.IIncomingBoxBackend)
class FilesystemBlobsBackend(object):
示例#43
0
from enum import Enum
import math
import threading

import numpy
import pkg_resources
import pyogg
import sounddevice as sd
from singtcommon import RingBuffer
from twisted.internet import defer
from twisted.internet.task import LoopingCall

# Start a logger with a namespace for a particular subsystem of our application.
from twisted.logger import Logger
log = Logger("recording_mode")


class RecordingMode:
    class State(Enum):
        INTRO = 10
        RECORD = 20

    def __init__(self, file_like, backing_audio_ids, recording_audio_id,
                 context):
        self._file_like = file_like
        self._backing_audio_ids = backing_audio_ids
        self._recording_audio_id = recording_audio_id
        self._context = context

        self._session_files = context["session_files"]
示例#44
0
 def __init__(self, bus, topic, transform, logger):
     self.bus = bus
     self.topic = topic
     self.transform = to_list(transform)
     self.log = logger or Logger()
示例#45
0
class BaseHandler(cyclone.web.RequestHandler):
    """Base cyclone RequestHandler for autopush"""

    log = Logger()

    def initialize(self):
        """Initialize info from the client"""
        self._client_info = self._init_info()

    @property
    def ap_settings(self):
        # type: () -> AutopushSettings
        return self.application.ap_settings

    @property
    def db(self):
        # type: () -> DatabaseManager
        return self.application.db

    @property
    def metrics(self):
        # type: () -> IMetrics
        return self.db.metrics

    def _init_info(self):
        return dict(
            ami_id=self.ap_settings.ami_id,
            request_id=str(uuid.uuid4()),
            user_agent=self.request.headers.get('user-agent', ""),
            remote_ip=self.request.headers.get('x-forwarded-for',
                                               self.request.remote_ip),
            authorization=self.request.headers.get('authorization', ""),
            message_ttl=self.request.headers.get('ttl', None),
            uri=self.request.uri,
            python_version=sys.version,
        )

    def write_error(self, code, **kwargs):
        """Write the error (otherwise unhandled exception when dealing with
        unknown method specifications.)

        This is a Cyclone API Override method used by endpoint and
        websocket.

        """
        self.set_status(code)
        if 'exc_info' in kwargs:
            self.log.failure(format=kwargs.get('format', "Exception"),
                             failure=failure.Failure(*kwargs['exc_info']),
                             client_info=self._client_info)
        else:
            self.log.failure("Error in handler: %s" % code,
                             client_info=self._client_info)
        self.finish()

    def authenticate_peer_cert(self):
        """Authenticate the client per the configured client_certs.

        Aborts the request w/ a 401 on failure.

        """
        cert = self.request.connection.transport.getPeerCertificate()
        if cert:
            cert_signature = cert.digest('sha256')
            cn = cert.get_subject().CN
            auth = self.ap_settings.client_certs.get(cert_signature)
            if auth is not None:
                # TLS authenticated
                self._client_info.update(tls_auth=auth,
                                         tls_auth_sha256=cert_signature,
                                         tls_auth_cn=cn)
                return

            self._client_info.update(tls_failed_sha256=cert_signature,
                                     tls_failed_cn=cn)

        self.log.warn("Failed TLS auth", client_info=self._client_info)
        self.set_status(401)
        # "Transport mode" isn't standard, inspired by:
        # http://www6.ietf.org/mail-archive/web/tls/current/msg05589.html
        self.set_header('WWW-Authenticate',
                        'Transport mode="tls-client-certificate"')
        self.finish()
示例#46
0
文件: server.py 项目: yuu6/twisted
class Request(Copyable, http.Request, components.Componentized):
    """
    An HTTP request.

    @ivar defaultContentType: A L{bytes} giving the default I{Content-Type}
        value to send in responses if no other value is set.  L{None} disables
        the default.

    @ivar _insecureSession: The L{Session} object representing state that will
        be transmitted over plain-text HTTP.

    @ivar _secureSession: The L{Session} object representing the state that
        will be transmitted only over HTTPS.
    """

    defaultContentType = b"text/html"

    site = None
    appRootURL = None
    prepath = None  # type: Optional[List[bytes]]
    postpath = None  # type: Optional[bytes]
    __pychecker__ = 'unusednames=issuer'
    _inFakeHead = False
    _encoder = None
    _log = Logger()

    def __init__(self, *args, **kw):
        http.Request.__init__(self, *args, **kw)
        components.Componentized.__init__(self)

    def getStateToCopyFor(self, issuer):
        x = self.__dict__.copy()
        del x['transport']
        # XXX refactor this attribute out; it's from protocol
        # del x['server']
        del x['channel']
        del x['content']
        del x['site']
        self.content.seek(0, 0)
        x['content_data'] = self.content.read()
        x['remote'] = ViewPoint(issuer, self)

        # Address objects aren't jellyable
        x['host'] = _addressToTuple(x['host'])
        x['client'] = _addressToTuple(x['client'])

        # Header objects also aren't jellyable.
        x['requestHeaders'] = list(x['requestHeaders'].getAllRawHeaders())

        return x

    # HTML generation helpers

    def sibLink(self, name):
        """
        Return the text that links to a sibling of the requested resource.

        @param name: The sibling resource
        @type name: C{bytes}

        @return: A relative URL.
        @rtype: C{bytes}
        """
        if self.postpath:
            return (len(self.postpath) * b"../") + name
        else:
            return name

    def childLink(self, name):
        """
        Return the text that links to a child of the requested resource.

        @param name: The child resource
        @type name: C{bytes}

        @return: A relative URL.
        @rtype: C{bytes}
        """
        lpp = len(self.postpath)
        if lpp > 1:
            return ((lpp - 1) * b"../") + name
        elif lpp == 1:
            return name
        else:  # lpp == 0
            if len(self.prepath) and self.prepath[-1]:
                return self.prepath[-1] + b'/' + name
            else:
                return name

    def gotLength(self, length):
        """
        Called when HTTP channel got length of content in this request.

        This method is not intended for users.

        @param length: The length of the request body, as indicated by the
            request headers.  L{None} if the request headers do not indicate a
            length.
        """
        try:
            getContentFile = self.channel.site.getContentFile
        except AttributeError:
            http.Request.gotLength(self, length)
        else:
            self.content = getContentFile(length)

    def process(self):
        """
        Process a request.

        Find the addressed resource in this request's L{Site},
        and call L{self.render()<Request.render()>} with it.

        @see: L{Site.getResourceFor()}
        """

        # get site from channel
        self.site = self.channel.site

        # set various default headers
        self.setHeader(b'server', version)
        self.setHeader(b'date', http.datetimeToString())

        # Resource Identification
        self.prepath = []
        self.postpath = list(map(unquote, self.path[1:].split(b'/')))

        # Short-circuit for requests whose path is '*'.
        if self.path == b'*':
            self._handleStar()
            return

        try:
            resrc = self.site.getResourceFor(self)
            if resource._IEncodingResource.providedBy(resrc):
                encoder = resrc.getEncoder(self)
                if encoder is not None:
                    self._encoder = encoder
            self.render(resrc)
        except:
            self.processingFailed(failure.Failure())

    def write(self, data):
        """
        Write data to the transport (if not responding to a HEAD request).

        @param data: A string to write to the response.
        @type data: L{bytes}
        """
        if not self.startedWriting:
            # Before doing the first write, check to see if a default
            # Content-Type header should be supplied. We omit it on
            # NOT_MODIFIED and NO_CONTENT responses. We also omit it if there
            # is a Content-Length header set to 0, as empty bodies don't need
            # a content-type.
            needsCT = self.code not in (http.NOT_MODIFIED, http.NO_CONTENT)
            contentType = self.responseHeaders.getRawHeaders(b'content-type')
            contentLength = self.responseHeaders.getRawHeaders(
                b'content-length')
            contentLengthZero = contentLength and (contentLength[0] == b'0')

            if (needsCT and contentType is None
                    and self.defaultContentType is not None
                    and not contentLengthZero):
                self.responseHeaders.setRawHeaders(b'content-type',
                                                   [self.defaultContentType])

        # Only let the write happen if we're not generating a HEAD response by
        # faking out the request method.  Note, if we are doing that,
        # startedWriting will never be true, and the above logic may run
        # multiple times.  It will only actually change the responseHeaders
        # once though, so it's still okay.
        if not self._inFakeHead:
            if self._encoder:
                data = self._encoder.encode(data)
            http.Request.write(self, data)

    def finish(self):
        """
        Override C{http.Request.finish} for possible encoding.
        """
        if self._encoder:
            data = self._encoder.finish()
            if data:
                http.Request.write(self, data)
        return http.Request.finish(self)

    def render(self, resrc):
        """
        Ask a resource to render itself.

        If the resource does not support the requested method,
        generate a C{NOT IMPLEMENTED} or C{NOT ALLOWED} response.

        @param resrc: The resource to render.
        @type resrc: L{twisted.web.resource.IResource}

        @see: L{IResource.render()<twisted.web.resource.IResource.render()>}
        """
        try:
            body = resrc.render(self)
        except UnsupportedMethod as e:
            allowedMethods = e.allowedMethods
            if (self.method == b"HEAD") and (b"GET" in allowedMethods):
                # We must support HEAD (RFC 2616, 5.1.1).  If the
                # resource doesn't, fake it by giving the resource
                # a 'GET' request and then return only the headers,
                # not the body.
                self._log.info("Using GET to fake a HEAD request for {resrc}",
                               resrc=resrc)
                self.method = b"GET"
                self._inFakeHead = True
                body = resrc.render(self)

                if body is NOT_DONE_YET:
                    self._log.info(
                        "Tried to fake a HEAD request for {resrc}, but "
                        "it got away from me.",
                        resrc=resrc)
                    # Oh well, I guess we won't include the content length.
                else:
                    self.setHeader(b'content-length', intToBytes(len(body)))

                self._inFakeHead = False
                self.method = b"HEAD"
                self.write(b'')
                self.finish()
                return

            if self.method in (supportedMethods):
                # We MUST include an Allow header
                # (RFC 2616, 10.4.6 and 14.7)
                self.setHeader(b'Allow', b', '.join(allowedMethods))
                s = ('''Your browser approached me (at %(URI)s) with'''
                     ''' the method "%(method)s".  I only allow'''
                     ''' the method%(plural)s %(allowed)s here.''' % {
                         'URI':
                         escape(nativeString(self.uri)),
                         'method':
                         nativeString(self.method),
                         'plural': ((len(allowedMethods) > 1) and 's') or '',
                         'allowed':
                         ', '.join([nativeString(x) for x in allowedMethods])
                     })
                epage = resource.ErrorPage(http.NOT_ALLOWED,
                                           "Method Not Allowed", s)
                body = epage.render(self)
            else:
                epage = resource.ErrorPage(
                    http.NOT_IMPLEMENTED, "Huh?",
                    "I don't know how to treat a %s request." %
                    (escape(self.method.decode("charmap")), ))
                body = epage.render(self)
        # end except UnsupportedMethod

        if body is NOT_DONE_YET:
            return
        if not isinstance(body, bytes):
            body = resource.ErrorPage(
                http.INTERNAL_SERVER_ERROR, "Request did not return bytes",
                "Request: " + util._PRE(reflect.safe_repr(self)) + "<br />" +
                "Resource: " + util._PRE(reflect.safe_repr(resrc)) + "<br />" +
                "Value: " + util._PRE(reflect.safe_repr(body))).render(self)

        if self.method == b"HEAD":
            if len(body) > 0:
                # This is a Bad Thing (RFC 2616, 9.4)
                self._log.info(
                    "Warning: HEAD request {slf} for resource {resrc} is"
                    " returning a message body. I think I'll eat it.",
                    slf=self,
                    resrc=resrc)
                self.setHeader(b'content-length', intToBytes(len(body)))
            self.write(b'')
        else:
            self.setHeader(b'content-length', intToBytes(len(body)))
            self.write(body)
        self.finish()

    def processingFailed(self, reason):
        """
        Finish this request with an indication that processing failed and
        possibly display a traceback.

        @param reason: Reason this request has failed.
        @type reason: L{twisted.python.failure.Failure}

        @return: The reason passed to this method.
        @rtype: L{twisted.python.failure.Failure}
        """
        self._log.failure('', failure=reason)
        if self.site.displayTracebacks:
            body = (b"<html><head><title>web.Server Traceback"
                    b" (most recent call last)</title></head>"
                    b"<body><b>web.Server Traceback"
                    b" (most recent call last):</b>\n\n" +
                    util.formatFailure(reason) + b"\n\n</body></html>\n")
        else:
            body = (b"<html><head><title>Processing Failed"
                    b"</title></head><body>"
                    b"<b>Processing Failed</b></body></html>")

        self.setResponseCode(http.INTERNAL_SERVER_ERROR)
        self.setHeader(b'content-type', b"text/html")
        self.setHeader(b'content-length', intToBytes(len(body)))
        self.write(body)
        self.finish()
        return reason

    def view_write(self, issuer, data):
        """Remote version of write; same interface.
        """
        self.write(data)

    def view_finish(self, issuer):
        """Remote version of finish; same interface.
        """
        self.finish()

    def view_addCookie(self, issuer, k, v, **kwargs):
        """Remote version of addCookie; same interface.
        """
        self.addCookie(k, v, **kwargs)

    def view_setHeader(self, issuer, k, v):
        """Remote version of setHeader; same interface.
        """
        self.setHeader(k, v)

    def view_setLastModified(self, issuer, when):
        """Remote version of setLastModified; same interface.
        """
        self.setLastModified(when)

    def view_setETag(self, issuer, tag):
        """Remote version of setETag; same interface.
        """
        self.setETag(tag)

    def view_setResponseCode(self, issuer, code, message=None):
        """
        Remote version of setResponseCode; same interface.
        """
        self.setResponseCode(code, message)

    def view_registerProducer(self, issuer, producer, streaming):
        """Remote version of registerProducer; same interface.
        (requires a remote producer.)
        """
        self.registerProducer(_RemoteProducerWrapper(producer), streaming)

    def view_unregisterProducer(self, issuer):
        self.unregisterProducer()

    ### these calls remain local

    _secureSession = None
    _insecureSession = None

    @property
    def session(self):
        """
        If a session has already been created or looked up with
        L{Request.getSession}, this will return that object.  (This will always
        be the session that matches the security of the request; so if
        C{forceNotSecure} is used on a secure request, this will not return
        that session.)

        @return: the session attribute
        @rtype: L{Session} or L{None}
        """
        if self.isSecure():
            return self._secureSession
        else:
            return self._insecureSession

    def getSession(self, sessionInterface=None, forceNotSecure=False):
        """
        Check if there is a session cookie, and if not, create it.

        By default, the cookie with be secure for HTTPS requests and not secure
        for HTTP requests.  If for some reason you need access to the insecure
        cookie from a secure request you can set C{forceNotSecure = True}.

        @param forceNotSecure: Should we retrieve a session that will be
            transmitted over HTTP, even if this L{Request} was delivered over
            HTTPS?
        @type forceNotSecure: L{bool}
        """
        # Make sure we aren't creating a secure session on a non-secure page
        secure = self.isSecure() and not forceNotSecure

        if not secure:
            cookieString = b"TWISTED_SESSION"
            sessionAttribute = "_insecureSession"
        else:
            cookieString = b"TWISTED_SECURE_SESSION"
            sessionAttribute = "_secureSession"

        session = getattr(self, sessionAttribute)

        if session is not None:
            # We have a previously created session.
            try:
                # Refresh the session, to keep it alive.
                session.touch()
            except (AlreadyCalled, AlreadyCancelled):
                # Session has already expired.
                session = None

        if session is None:
            # No session was created yet for this request.
            cookiename = b"_".join([cookieString] + self.sitepath)
            sessionCookie = self.getCookie(cookiename)
            if sessionCookie:
                try:
                    session = self.site.getSession(sessionCookie)
                except KeyError:
                    pass
            # if it still hasn't been set, fix it up.
            if not session:
                session = self.site.makeSession()
                self.addCookie(cookiename,
                               session.uid,
                               path=b"/",
                               secure=secure)

        setattr(self, sessionAttribute, session)

        if sessionInterface:
            return session.getComponent(sessionInterface)

        return session

    def _prePathURL(self, prepath):
        port = self.getHost().port
        if self.isSecure():
            default = 443
        else:
            default = 80
        if port == default:
            hostport = ''
        else:
            hostport = ':%d' % port
        prefix = networkString(
            'http%s://%s%s/' %
            (self.isSecure() and 's'
             or '', nativeString(self.getRequestHostname()), hostport))
        path = b'/'.join([quote(segment, safe=b'') for segment in prepath])
        return prefix + path

    def prePathURL(self):
        return self._prePathURL(self.prepath)

    def URLPath(self):
        from twisted.python import urlpath
        return urlpath.URLPath.fromRequest(self)

    def rememberRootURL(self):
        """
        Remember the currently-processed part of the URL for later
        recalling.
        """
        url = self._prePathURL(self.prepath[:-1])
        self.appRootURL = url

    def getRootURL(self):
        """
        Get a previously-remembered URL.

        @return: An absolute URL.
        @rtype: L{bytes}
        """
        return self.appRootURL

    def _handleStar(self):
        """
        Handle receiving a request whose path is '*'.

        RFC 7231 defines an OPTIONS * request as being something that a client
        can send as a low-effort way to probe server capabilities or readiness.
        Rather than bother the user with this, we simply fast-path it back to
        an empty 200 OK. Any non-OPTIONS verb gets a 405 Method Not Allowed
        telling the client they can only use OPTIONS.
        """
        if self.method == b'OPTIONS':
            self.setResponseCode(http.OK)
        else:
            self.setResponseCode(http.NOT_ALLOWED)
            self.setHeader(b'Allow', b'OPTIONS')

        # RFC 7231 says we MUST set content-length 0 when responding to this
        # with no body.
        self.setHeader(b'Content-Length', b'0')
        self.finish()
示例#47
0
    def __init__(self,
                 db_filepath: str,
                 rest_host: str,
                 rest_port: int,
                 client_password: str = None,
                 crash_on_error: bool = False,
                 economics: TokenEconomics = None,
                 distribute_ether: bool = True,
                 *args,
                 **kwargs):

        # Character
        super().__init__(*args, **kwargs)
        self.log = Logger(f"felix-{self.checksum_address[-6::]}")

        # Network
        self.rest_port = rest_port
        self.rest_host = rest_host
        self.rest_app = NOT_RUNNING
        self.crash_on_error = crash_on_error

        # Database
        self.db_filepath = db_filepath
        self.db = NO_DATABASE_AVAILABLE
        self.db_engine = create_engine(f'sqlite:///{self.db_filepath}',
                                       convert_unicode=True)

        # Blockchain
        transacting_power = TransactingPower(blockchain=self.blockchain,
                                             password=client_password,
                                             account=self.checksum_address)
        self._crypto_power.consume_power_up(transacting_power)

        self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)
        self.reserved_addresses = [
            self.checksum_address, BlockchainInterface.NULL_ADDRESS
        ]

        # Update reserved addresses with deployed contracts
        existing_entries = list(self.blockchain.registry.enrolled_addresses)
        self.reserved_addresses.extend(existing_entries)

        # Distribution
        self.__distributed = 0  # Track NU Output
        self.__airdrop = 0  # Track Batch
        self.__disbursement = 0  # Track Quantity
        self._distribution_task = LoopingCall(f=self.airdrop_tokens)
        self._distribution_task.clock = self._CLOCK
        self.start_time = NOT_RUNNING

        if not economics:
            economics = TokenEconomics()
        self.economics = economics

        self.MAXIMUM_DISBURSEMENT = economics.maximum_allowed_locked
        self.INITIAL_DISBURSEMENT = economics.minimum_allowed_locked

        # Optionally send ether with each token transaction
        self.distribute_ether = distribute_ether

        # Banner
        self.log.info(FELIX_BANNER.format(self.checksum_address))
示例#48
0
class Miner(NucypherTokenActor):
    """
    Ursula baseclass for blockchain operations, practically carrying a pickaxe.
    """

    __current_period_sample_rate = 60*60  # seconds

    class MinerError(NucypherTokenActor.ActorError):
        pass

    def __init__(self, is_me: bool, start_staking_loop: bool = True, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self.log = Logger("miner")
        self.is_me = is_me

        if is_me:
            self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)

            # Staking Loop
            self.__current_period = None
            self._abort_on_staking_error = True
            self._staking_task = task.LoopingCall(self._confirm_period)

        else:
            self.token_agent = constants.STRANGER_MINER

        self.miner_agent = MinerAgent(blockchain=self.blockchain)

        self.__stakes = constants.NO_STAKES
        self.__start_time = constants.NO_STAKES
        self.__uptime_period = constants.NO_STAKES
        self.__terminal_period = constants.NO_STAKES

        self.__read_stakes()
        if self.stakes and start_staking_loop:
            self.stake()

    #
    # Staking
    #
    @only_me
    def stake(self, confirm_now: bool = True) -> None:
        """High-level staking looping call initialization"""
        # TODO #841: Check if there is an active stake in the current period: Resume staking daemon

        # Get the last stake end period of all stakes
        terminal_period = max(stake.end_period for stake in self.stakes.values())

        if confirm_now:
            self.confirm_activity()

        # record start time and periods
        self.__start_time = maya.now()
        self.__uptime_period = self.miner_agent.get_current_period()
        self.__terminal_period = self.__uptime_period + terminal_period
        self.__current_period = self.__uptime_period
        self.start_staking_loop()

    @only_me
    def _confirm_period(self):

        period = self.miner_agent.get_current_period()
        self.log.info("Checking for new period. Current period is {}".format(self.__current_period))

        if self.__current_period != period:

            # check for stake expiration
            stake_expired = self.__current_period >= self.__terminal_period
            if stake_expired:
                self.log.info('Stake duration expired')
                return True

            self.confirm_activity()
            self.__current_period = period
            self.log.info("Confirmed activity for period {}".format(self.__current_period))

    @only_me
    def _crash_gracefully(self, failure=None):
        """
        A facility for crashing more gracefully in the event that an exception
        is unhandled in a different thread, especially inside a loop like the learning loop.
        """
        self._crashed = failure
        failure.raiseException()

    @only_me
    def handle_staking_errors(self, *args, **kwargs):
        failure = args[0]
        if self._abort_on_staking_error:
            self.log.critical("Unhandled error during node staking.  Attempting graceful crash.")
            reactor.callFromThread(self._crash_gracefully, failure=failure)
        else:
            self.log.warn("Unhandled error during node learning: {}".format(failure.getTraceback()))

    @only_me
    def start_staking_loop(self, now=True):
        if self._staking_task.running:
            return False
        else:
            d = self._staking_task.start(interval=self.__current_period_sample_rate, now=now)
            d.addErrback(self.handle_staking_errors)
            self.log.info(f"Starting Staking Loop NOW - running until period {self.__terminal_period}")
            return d

    @property
    def is_staking(self):
        """Checks if this Miner currently has locked tokens."""
        return bool(self.locked_tokens > 0)

    @property
    def locked_tokens(self):
        """Returns the amount of tokens this miner has locked."""
        return self.miner_agent.get_locked_tokens(miner_address=self.checksum_public_address)

    @property
    def total_staked(self) -> NU:
        if self.stakes:
            return NU(sum(int(stake.value) for stake in self.stakes.values()), 'NuNit')
        else:
            return NU(0, 'NuNit')

    def __read_stakes(self) -> None:
        stakes_reader = self.miner_agent.get_all_stakes(miner_address=self.checksum_public_address)
        stakes = dict()
        for index, stake_info in enumerate(stakes_reader):
            stake = Stake.from_stake_info(owner_address=self.checksum_public_address,
                                          stake_info=stake_info,
                                          index=index)
            stakes[index] = stake
        self.__stakes = stakes

    @property
    def stakes(self) -> Dict[str, Stake]:
        """Return all cached stakes from the blockchain."""
        return self.__stakes

    @only_me
    def deposit(self, amount: int, lock_periods: int) -> Tuple[str, str]:
        """Public facing method for token locking."""

        approve_txhash = self.token_agent.approve_transfer(amount=amount,
                                                           target_address=self.miner_agent.contract_address,
                                                           sender_address=self.checksum_public_address)

        deposit_txhash = self.miner_agent.deposit_tokens(amount=amount,
                                                         lock_periods=lock_periods,
                                                         sender_address=self.checksum_public_address)

        return approve_txhash, deposit_txhash

    @only_me
    def divide_stake(self,
                     stake_index: int,
                     target_value: NU,
                     additional_periods: int = None,
                     expiration: maya.MayaDT = None) -> dict:
        """
        Modifies the unlocking schedule and value of already locked tokens.

        This actor requires that is_me is True, and that the expiration datetime is after the existing
        locking schedule of this miner, or an exception will be raised.

        :param stake_index: The miner's stake index of the stake to divide
        :param additional_periods: The number of periods to extend the stake by
        :param target_value:  The quantity of tokens in the smallest denomination to divide.
        :param expiration: The new expiration date to set as an end period for stake division.
        :return: Returns the blockchain transaction hash

        """

        if additional_periods and expiration:
            raise ValueError("Pass the number of lock periods or an expiration MayaDT; not both.")

        stake = self.__stakes[stake_index]

        if expiration:
            additional_periods = datetime_to_period(datetime=expiration) - stake.end_period
            if additional_periods <= 0:
                raise self.MinerError("Expiration {} must be at least 1 period from now.".format(expiration))

        if target_value >= stake.value:
            raise self.MinerError(f"Cannot divide stake; Value ({target_value}) must be less "
                                  f"than the existing stake value {stake.value}.")

        # Ensure both halves are for valid amounts
        validate_stake_amount(amount=target_value)
        validate_stake_amount(amount=stake.value - target_value)

        tx = self.miner_agent.divide_stake(miner_address=self.checksum_public_address,
                                           stake_index=stake_index,
                                           target_value=int(target_value),
                                           periods=additional_periods)

        self.blockchain.wait_for_receipt(tx)
        self.__read_stakes()  # update local on-chain stake cache
        return tx

    @only_me
    def __validate_stake(self, amount: NU, lock_periods: int) -> bool:

        validate_stake_amount(amount=amount)
        validate_locktime(lock_periods=lock_periods)

        if not self.token_balance >= amount:
            raise self.MinerError("Insufficient miner token balance ({balance})".format(balance=self.token_balance))
        else:
            return True

    @only_me
    def initialize_stake(self,
                         amount: NU,
                         lock_periods: int = None,
                         expiration: maya.MayaDT = None,
                         entire_balance: bool = False) -> dict:
        """
        High level staking method for Miners.

        :param amount: Amount of tokens to stake denominated in the smallest unit.
        :param lock_periods: Duration of stake in periods.
        :param expiration: A MayaDT object representing the time the stake expires; used to calculate lock_periods.
        :param entire_balance: If True, stake the entire balance of this node, or the maximum possible.

        """

        if lock_periods and expiration:
            raise ValueError("Pass the number of lock periods or an expiration MayaDT; not both.")
        if entire_balance and amount:
            raise self.MinerError("Specify an amount or entire balance, not both")

        if expiration:
            lock_periods = calculate_period_duration(future_time=expiration)

        if entire_balance is True:
            amount = self.token_balance

        amount = NU(int(amount), 'NuNit')

        staking_transactions = OrderedDict()  # type: OrderedDict # Time series of txhases

        # Validate
        assert self.__validate_stake(amount=amount, lock_periods=lock_periods)

        # Transact
        approve_txhash, initial_deposit_txhash = self.deposit(amount=int(amount), lock_periods=lock_periods)
        self._transaction_cache.append((datetime.utcnow(), initial_deposit_txhash))

        staking_transactions['approve'] = approve_txhash
        staking_transactions['deposit'] = initial_deposit_txhash
        self.__read_stakes()  # update local on-chain stake cache

        self.log.info("{} Initialized new stake: {} tokens for {} periods".format(self.checksum_public_address, amount, lock_periods))
        return staking_transactions

    #
    # Reward and Collection
    #

    @only_me
    def confirm_activity(self) -> str:
        """Miner rewarded for every confirmed period"""
        txhash = self.miner_agent.confirm_activity(node_address=self.checksum_public_address)
        self._transaction_cache.append((datetime.utcnow(), txhash))
        return txhash

    @only_me
    def mint(self) -> Tuple[str, str]:
        """Computes and transfers tokens to the miner's account"""
        mint_txhash = self.miner_agent.mint(node_address=self.checksum_public_address)
        self._transaction_cache.append((datetime.utcnow(), mint_txhash))
        return mint_txhash

    def calculate_reward(self) -> int:
        staking_reward = self.miner_agent.calculate_staking_reward(checksum_address=self.checksum_public_address)
        return staking_reward

    @only_me
    def collect_policy_reward(self, collector_address=None, policy_agent: PolicyAgent = None):
        """Collect rewarded ETH"""
        policy_agent = policy_agent if policy_agent is not None else PolicyAgent(blockchain=self.blockchain)

        withdraw_address = collector_address or self.checksum_public_address
        policy_reward_txhash = policy_agent.collect_policy_reward(collector_address=withdraw_address, miner_address=self.checksum_public_address)
        self._transaction_cache.append((datetime.utcnow(), policy_reward_txhash))
        return policy_reward_txhash

    @only_me
    def collect_staking_reward(self) -> str:
        """Withdraw tokens rewarded for staking."""
        collection_txhash = self.miner_agent.collect_staking_reward(checksum_address=self.checksum_public_address)
        self._transaction_cache.append((datetime.utcnow(), collection_txhash))
        return collection_txhash
#  See the License for the specific language governing permissions and
#  limitations under the License.
"""Parse sibling group pages on TARE."""

import random

from bs4 import BeautifulSoup
from twisted.logger import Logger

from data_types import Contact, SiblingGroup
from helpers import return_type
from only_child_parser import gather_profile_details_for as gather_child
from utils import create_attachment, get_pictures_encoded, parse_name
from validators import valid_email, valid_phone

log = Logger()

ALL_CHILDREN_SELECTOR = "div#pageContent > div > div.galleryImage"
CASE_WORKER_SELECTOR = "div#pageContent > div > div:nth-of-type(6)"

# These are the magic phrases that get the data from a TARE profile page
# They also happen to represent some, if not all of the fields to be updated
SGROUP_SELECTORS = {
    "Name": ("div > div:nth-of-type(2) > a"),
    # TARE Id
    "Case_Number__c":
    ("div#pageContent > div > div:nth-of-type(6) > div:nth-of-type(2)"),
    "Children_s_Bio__c": [
        "div#pageContent > div > div:nth-of-type(8)",
        "div#pageContent > div > div:nth-of-type(9)",
        "div#pageContent > div > div:nth-of-type(10)",
示例#50
0
class AsyncioSelectorReactor(PosixReactorBase):
    """
    Reactor running on top of L{asyncio.SelectorEventLoop}.
    """
    _asyncClosed = False
    _log = Logger()

    def __init__(self, eventloop=None):

        if eventloop is None:
            eventloop = get_event_loop()

        self._asyncioEventloop = eventloop
        self._writers = {}
        self._readers = {}
        self._delayedCalls = set()
        self._continuousPolling = _ContinuousPolling(self)
        super().__init__()

    def _unregisterFDInAsyncio(self, fd):
        """
        Compensate for a bug in asyncio where it will not unregister a FD that
        it cannot handle in the epoll loop. It touches internal asyncio code.

        A description of the bug by markrwilliams:

        The C{add_writer} method of asyncio event loops isn't atomic because
        all the Selector classes in the selector module internally record a
        file object before passing it to the platform's selector
        implementation. If the platform's selector decides the file object
        isn't acceptable, the resulting exception doesn't cause the Selector to
        un-track the file object.

        The failing/hanging stdio test goes through the following sequence of
        events (roughly):

        * The first C{connection.write(intToByte(value))} call hits the asyncio
        reactor's C{addWriter} method.

        * C{addWriter} calls the asyncio loop's C{add_writer} method, which
        happens to live on C{_BaseSelectorEventLoop}.

        * The asyncio loop's C{add_writer} method checks if the file object has
        been registered before via the selector's C{get_key} method.

        * It hasn't, so the KeyError block runs and calls the selector's
        register method

        * Code examples that follow use EpollSelector, but the code flow holds
        true for any other selector implementation. The selector's register
        method first calls through to the next register method in the MRO

        * That next method is always C{_BaseSelectorImpl.register} which
        creates a C{SelectorKey} instance for the file object, stores it under
        the file object's file descriptor, and then returns it.

        * Control returns to the concrete selector implementation, which asks
        the operating system to track the file descriptor using the right API.

        * The operating system refuses! An exception is raised that, in this
        case, the asyncio reactor handles by creating a C{_ContinuousPolling}
        object to watch the file descriptor.

        * The second C{connection.write(intToByte(value))} call hits the
        asyncio reactor's C{addWriter} method, which hits the C{add_writer}
        method. But the loop's selector's get_key method now returns a
        C{SelectorKey}! Now the asyncio reactor's C{addWriter} method thinks
        the asyncio loop will watch the file descriptor, even though it won't.
        """
        try:
            self._asyncioEventloop._selector.unregister(fd)
        except:
            pass

    def _readOrWrite(self, selectable, read):
        method = selectable.doRead if read else selectable.doWrite

        if selectable.fileno() == -1:
            self._disconnectSelectable(selectable, _NO_FILEDESC, read)
            return

        try:
            why = method()
        except Exception as e:
            why = e
            self._log.failure(None)
        if why:
            self._disconnectSelectable(selectable, why, read)

    def addReader(self, reader):
        if reader in self._readers.keys() or \
           reader in self._continuousPolling._readers:
            return

        fd = reader.fileno()
        try:
            self._asyncioEventloop.add_reader(fd, callWithLogger, reader,
                                              self._readOrWrite, reader, True)
            self._readers[reader] = fd
        except IOError as e:
            self._unregisterFDInAsyncio(fd)
            if e.errno == errno.EPERM:
                # epoll(7) doesn't support certain file descriptors,
                # e.g. filesystem files, so for those we just poll
                # continuously:
                self._continuousPolling.addReader(reader)
            else:
                raise

    def addWriter(self, writer):
        if writer in self._writers.keys() or \
           writer in self._continuousPolling._writers:
            return

        fd = writer.fileno()
        try:
            self._asyncioEventloop.add_writer(fd, callWithLogger, writer,
                                              self._readOrWrite, writer, False)
            self._writers[writer] = fd
        except PermissionError:
            self._unregisterFDInAsyncio(fd)
            # epoll(7) doesn't support certain file descriptors,
            # e.g. filesystem files, so for those we just poll
            # continuously:
            self._continuousPolling.addWriter(writer)
        except BrokenPipeError:
            # The kqueuereactor will raise this if there is a broken pipe
            self._unregisterFDInAsyncio(fd)
        except:
            self._unregisterFDInAsyncio(fd)
            raise

    def removeReader(self, reader):

        # First, see if they're trying to remove a reader that we don't have.
        if not (reader in self._readers.keys() \
                or self._continuousPolling.isReading(reader)):
            # We don't have it, so just return OK.
            return

        # If it was a cont. polling reader, check there first.
        if self._continuousPolling.isReading(reader):
            self._continuousPolling.removeReader(reader)
            return

        fd = reader.fileno()
        if fd == -1:
            # If the FD is -1, we want to know what its original FD was, to
            # remove it.
            fd = self._readers.pop(reader)
        else:
            self._readers.pop(reader)

        self._asyncioEventloop.remove_reader(fd)

    def removeWriter(self, writer):

        # First, see if they're trying to remove a writer that we don't have.
        if not (writer in self._writers.keys() \
                or self._continuousPolling.isWriting(writer)):
            # We don't have it, so just return OK.
            return

        # If it was a cont. polling writer, check there first.
        if self._continuousPolling.isWriting(writer):
            self._continuousPolling.removeWriter(writer)
            return

        fd = writer.fileno()

        if fd == -1:
            # If the FD is -1, we want to know what its original FD was, to
            # remove it.
            fd = self._writers.pop(writer)
        else:
            self._writers.pop(writer)

        self._asyncioEventloop.remove_writer(fd)

    def removeAll(self):
        return (self._removeAll(self._readers.keys(), self._writers.keys()) +
                self._continuousPolling.removeAll())

    def getReaders(self):
        return (list(self._readers.keys()) +
                self._continuousPolling.getReaders())

    def getWriters(self):
        return (list(self._writers.keys()) +
                self._continuousPolling.getWriters())

    def getDelayedCalls(self):
        return list(self._delayedCalls)

    def iterate(self, timeout):
        self._asyncioEventloop.call_later(timeout + 0.01,
                                          self._asyncioEventloop.stop)
        self._asyncioEventloop.run_forever()

    def run(self, installSignalHandlers=True):
        self.startRunning(installSignalHandlers=installSignalHandlers)
        self._asyncioEventloop.run_forever()
        if self._justStopped:
            self._justStopped = False

    def stop(self):
        super().stop()
        self.callLater(0, self.fireSystemEvent, "shutdown")

    def crash(self):
        super().crash()
        self._asyncioEventloop.stop()

    def seconds(self):
        return self._asyncioEventloop.time()

    def callLater(self, seconds, f, *args, **kwargs):
        def run():
            dc.called = True
            self._delayedCalls.remove(dc)
            f(*args, **kwargs)

        handle = self._asyncioEventloop.call_later(seconds, run)
        dchandle = _DCHandle(handle)

        def cancel(dc):
            self._delayedCalls.remove(dc)
            dchandle.cancel()

        def reset(dc):
            dchandle.handle = self._asyncioEventloop.call_at(dc.time, run)

        dc = DelayedCall(self.seconds() + seconds,
                         run, (), {},
                         cancel,
                         reset,
                         seconds=self.seconds)
        self._delayedCalls.add(dc)
        return dc

    def callFromThread(self, f, *args, **kwargs):
        g = lambda: self.callLater(0, f, *args, **kwargs)
        self._asyncioEventloop.call_soon_threadsafe(g)
示例#51
0
class ClientService(service.Service, object):
    """
    A L{ClientService} maintains a single outgoing connection to a client
    endpoint, reconnecting after a configurable timeout when a connection
    fails, either before or after connecting.

    @since: 16.1.0
    """

    _log = Logger()

    def __init__(self, endpoint, factory, retryPolicy=None, clock=None):
        """
        @param endpoint: A L{stream client endpoint
            <interfaces.IStreamClientEndpoint>} provider which will be used to
            connect when the service starts.

        @param factory: A L{protocol factory <interfaces.IProtocolFactory>}
            which will be used to create clients for the endpoint.

        @param retryPolicy: A policy configuring how long L{ClientService} will
            wait between attempts to connect to C{endpoint}.
        @type retryPolicy: callable taking (the number of failed connection
            attempts made in a row (L{int})) and returning the number of
            seconds to wait before making another attempt.

        @param clock: The clock used to schedule reconnection.  It's mainly
            useful to be parametrized in tests.  If the factory is serialized,
            this attribute will not be serialized, and the default value (the
            reactor) will be restored when deserialized.
        @type clock: L{IReactorTime}
        """
        clock = _maybeGlobalReactor(clock)
        retryPolicy = _defaultPolicy if retryPolicy is None else retryPolicy

        self._endpoint = endpoint
        self._failedAttempts = 0
        self._stopped = False
        self._factory = factory
        self._timeoutForAttempt = retryPolicy
        self._clock = clock
        self._stopRetry = _noop
        self._lostDeferred = succeed(None)
        self._connectionInProgress = succeed(None)
        self._loseConnection = _noop

        self._currentConnection = None
        self._awaitingConnected = []


    def whenConnected(self):
        """
        Retrieve the currently-connected L{Protocol}, or the next one to
        connect.

        @return: a Deferred that fires with a protocol produced by the factory
            passed to C{__init__}
        @rtype: L{Deferred} firing with L{IProtocol} or failing with
            L{CancelledError} the service is stopped.
        """
        if self._currentConnection is not None:
            return succeed(self._currentConnection)
        elif self._stopped:
            return fail(CancelledError())
        else:
            result = Deferred()
            self._awaitingConnected.append(result)
            return result


    def _unawait(self, value):
        """
        Fire all outstanding L{ClientService.whenConnected} L{Deferred}s.

        @param value: the value to fire the L{Deferred}s with.
        """
        self._awaitingConnected, waiting = [], self._awaitingConnected
        for w in waiting:
            w.callback(value)


    def startService(self):
        """
        Start this L{ClientService}, initiating the connection retry loop.
        """
        if self.running:
            self._log.warn("Duplicate ClientService.startService {log_source}")
            return
        super(ClientService, self).startService()
        self._failedAttempts = 0

        def connectNow():
            thisLostDeferred = Deferred()

            def clientConnect(protocol):
                self._failedAttempts = 0
                self._loseConnection = protocol.transport.loseConnection
                self._lostDeferred = thisLostDeferred
                self._currentConnection = protocol._protocol
                self._unawait(self._currentConnection)

            def clientDisconnect(reason):
                self._currentConnection = None
                self._loseConnection = _noop
                thisLostDeferred.callback(None)
                retry(reason)

            factoryProxy = _DisconnectFactory(self._factory, clientDisconnect)

            self._stopRetry = _noop
            self._connectionInProgress = (self._endpoint.connect(factoryProxy)
                                          .addCallback(clientConnect)
                                          .addErrback(retry))

        def retry(failure):
            if not self.running:
                return
            self._failedAttempts += 1
            delay = self._timeoutForAttempt(self._failedAttempts)
            self._log.info("Scheduling retry {attempt} to connect {endpoint} "
                           "in {delay} seconds.", attempt=self._failedAttempts,
                           endpoint=self._endpoint, delay=delay)
            self._stopRetry = self._clock.callLater(delay, connectNow).cancel

        connectNow()


    def stopService(self):
        """
        Stop attempting to reconnect and close any existing connections.

        @return: a L{Deferred} that fires when all outstanding connections are
            closed and all in-progress connection attempts halted.
        """
        super(ClientService, self).stopService()
        self._stopRetry()
        self._stopRetry = _noop
        self._connectionInProgress.cancel()
        self._loseConnection()
        self._currentConnection = None
        def finishStopping(result):
            if not self.running:
                self._stopped = True
                self._unawait(Failure(CancelledError()))
            return None
        return (gatherResults([self._connectionInProgress, self._lostDeferred])
                .addBoth(finishStopping))
示例#52
0
class PaymentController(object):
    """
    The ``PaymentController`` coordinates the process of turning a voucher
    into a collection of ZKAPs:

      1. A voucher to be consumed is handed to the controller.  Once a voucher
         is handed over to the controller the controller takes all
         responsibility for it.

      2. The controller tells the data store to remember the voucher.  The
         data store provides durability for the voucher which represents an
         investment (ie, a purchase) on the part of the client.

      3. The controller hands the voucher and some random tokens to a redeemer.
         In the future, this step will need to be retried in the case of failures.

      4. When the voucher has been redeemed for unblinded tokens (inputs to
         pass construction), the controller hands them to the data store with
         the voucher.  The data store marks the voucher as redeemed and stores
         the unblinded tokens for use by the storage client.

    :ivar int default_token_count: The number of tokens to request when
        redeeming a voucher, if no other count is given when the redemption is
        started.

    :ivar dict[unicode, Redeeming] _active: A mapping from voucher identifiers
        which currently have redemption attempts in progress to a
        ``Redeeming`` state representing the attempt.

    :ivar dict[unicode, datetime] _error: A mapping from voucher identifiers
        which have recently failed with an unrecognized, transient error.

    :ivar dict[unicode, datetime] _unpaid: A mapping from voucher identifiers
        which have recently failed a redemption attempt due to an unpaid
        response from the redemption server to timestamps when the failure was
        observed.

    :ivar int num_redemption_groups: The number of groups into which to divide
        tokens during the redemption process, with each group being redeemed
        separately from the rest.  This value needs to agree with the value
        the PaymentServer is configured with.

        TODO: Retrieve this value from the PaymentServer or from the
        ZKAPAuthorizer configuration instead of just hard-coding a duplicate
        value in this implementation.

    :ivar IReactorTime _clock: The reactor to use for scheduling redemption
        retries.
    """
    _log = Logger()

    store = attr.ib()
    redeemer = attr.ib()
    default_token_count = attr.ib()

    num_redemption_groups = attr.ib(default=16)

    _clock = attr.ib(default=None)

    _error = attr.ib(default=attr.Factory(dict))
    _unpaid = attr.ib(default=attr.Factory(dict))
    _active = attr.ib(default=attr.Factory(dict))

    def __attrs_post_init__(self):
        """
        Check the voucher store for any vouchers in need of redemption.

        This is an initialization-time hook called by attrs.
        """
        if self._clock is None:
            self._clock = namedAny("twisted.internet.reactor")

        self._check_pending_vouchers()
        # Also start a time-based polling loop to retry redemption of vouchers
        # in retryable error states.
        self._schedule_retries()

    def _schedule_retries(self):
        # TODO: should not eagerly schedule calls.  If there are no vouchers
        # in an error state we shouldn't wake up at all.
        #
        # TODO: should schedule retries on a bounded exponential backoff
        # instead, perhaps on a per-voucher basis.
        self._retry_task = LoopingCall(self._retry_redemption)
        self._retry_task.clock = self._clock
        self._retry_task.start(
            RETRY_INTERVAL.total_seconds(),
            now=False,
        )

    def _retry_redemption(self):
        for voucher in self._error.keys() + self._unpaid.keys():
            if voucher in self._active:
                continue
            if self.get_voucher(voucher).state.should_start_redemption():
                self.redeem(voucher)

    def _check_pending_vouchers(self):
        """
        Find vouchers in the voucher store that need to be redeemed and try to
        redeem them.
        """
        vouchers = self.store.list()
        for voucher in vouchers:
            if voucher.state.should_start_redemption():
                self._log.info(
                    "Controller found voucher ({voucher}) at startup that needs redemption.",
                    voucher=voucher.number,
                )
                self.redeem(voucher.number)
            else:
                self._log.info(
                    "Controller found voucher ({voucher}) at startup that does not need redemption.",
                    voucher=voucher.number,
                )

    def _get_random_tokens_for_voucher(self, voucher, counter, num_tokens,
                                       total_tokens):
        """
        Generate or load random tokens for a redemption attempt of a voucher.

        :param int num_tokens: The number of tokens to get.

        :param int total_tokens: The total number of tokens for which this
            voucher is expected to be redeemed.
        """
        def get_tokens():
            self._log.info(
                "Generating random tokens for a voucher ({voucher}).",
                voucher=voucher,
            )
            return self.redeemer.random_tokens_for_voucher(
                Voucher(
                    number=voucher,
                    # Unclear whether this information is useful to redeemers
                    # but we cannot construct a Voucher without some value
                    # here.
                    expected_tokens=total_tokens,
                ),
                counter,
                num_tokens,
            )

        return self.store.add(
            voucher,
            total_tokens,
            counter,
            get_tokens,
        )

    @inlineCallbacks
    def redeem(self, voucher, num_tokens=None):
        """
        :param unicode voucher: A voucher to redeem.

        :param int num_tokens: A number of tokens to redeem.
        """
        # Try to get an existing voucher object for the given number.
        try:
            voucher_obj = self.store.get(voucher)
        except KeyError:
            # This is our first time dealing with this number.
            counter_start = 0
            if num_tokens is None:
                num_tokens = self.default_token_count
        else:
            num_tokens = voucher_obj.expected_tokens
            # Determine the starting point from the state.
            if voucher_obj.state.should_start_redemption():
                counter_start = voucher_obj.state.counter
            else:
                raise ValueError(
                    "Cannot redeem voucher in state {}.".format(
                        voucher_obj.state, ), )

        self._log.info(
            "Starting redemption of {voucher}[{start}..{end}] for {num_tokens} tokens.",
            voucher=voucher,
            start=counter_start,
            end=self.num_redemption_groups,
            num_tokens=num_tokens,
        )
        for counter in range(counter_start, self.num_redemption_groups):
            # Pre-generate the random tokens to use when redeeming the voucher.
            # These are persisted with the voucher so the redemption can be made
            # idempotent.  We don't want to lose the value if we fail after the
            # server deems the voucher redeemed but before we persist the result.
            # With a stable set of tokens, we can re-submit them and the server
            # can re-sign them without fear of issuing excess passes.  Whether the
            # server signs a given set of random tokens once or many times, the
            # number of passes that can be constructed is still only the size of
            # the set of random tokens.
            token_count = token_count_for_group(self.num_redemption_groups,
                                                num_tokens, counter)
            tokens = self._get_random_tokens_for_voucher(
                voucher,
                counter,
                num_tokens=token_count,
                total_tokens=num_tokens,
            )

            # Reload state before each iteration.  We expect it to change each time.
            voucher_obj = self.store.get(voucher)

            succeeded = yield self._perform_redeem(voucher_obj, counter,
                                                   tokens)
            if not succeeded:
                self._log.info(
                    "Temporarily suspending redemption of {voucher} after non-success result.",
                    voucher=voucher,
                )
                break

    def _perform_redeem(self, voucher, counter, random_tokens):
        """
        Use the redeemer to redeem the given voucher and random tokens.

        This will not persist the voucher or random tokens but it will persist
        the result.

        :return Deferred[bool]: A ``Deferred`` firing with ``True`` if and
            only if redemption succeeds.
        """
        if not isinstance(voucher.state, model_Pending):
            raise ValueError(
                "Cannot redeem voucher in state {} instead of Pending.".format(
                    voucher.state, ), )

        # Ask the redeemer to do the real task of redemption.
        self._log.info("Redeeming random tokens for a voucher ({voucher}).",
                       voucher=voucher)
        d = bracket(
            lambda: setitem(
                self._active,
                voucher.number,
                model_Redeeming(
                    started=self.store.now(),
                    counter=voucher.state.counter,
                ),
            ),
            lambda: delitem(self._active, voucher.number),
            lambda: self.redeemer.redeemWithCounter(voucher, counter,
                                                    random_tokens),
        )
        d.addCallbacks(
            partial(self._redeem_success, voucher.number, counter),
            partial(self._redeem_failure, voucher.number),
        )
        d.addErrback(partial(self._final_redeem_error, voucher.number))
        return d

    def _redeem_success(self, voucher, counter, result):
        """
        Update the database state to reflect that a voucher was redeemed and to
        store the resulting unblinded tokens (which can be used to construct
        passes later).
        """
        self._log.info(
            "Inserting redeemed unblinded tokens for a voucher ({voucher}).",
            voucher=voucher,
        )
        self.store.insert_unblinded_tokens_for_voucher(
            voucher,
            result.public_key,
            result.unblinded_tokens,
            completed=(counter + 1 == self.num_redemption_groups),
        )
        return True

    def _redeem_failure(self, voucher, reason):
        if reason.check(AlreadySpent):
            self._log.error(
                "Voucher {voucher} reported as already spent during redemption.",
                voucher=voucher,
            )
            self.store.mark_voucher_double_spent(voucher)
        elif reason.check(Unpaid):
            self._log.error(
                "Voucher {voucher} reported as not paid for during redemption.",
                voucher=voucher,
            )
            self._unpaid[voucher] = self.store.now()
        else:
            self._log.error(
                "Redeeming random tokens for a voucher ({voucher}) failed: {reason!r}",
                reason=reason.value,
                voucher=voucher,
            )
            self._error[voucher] = model_Error(
                finished=self.store.now(),
                details=reason.getErrorMessage().decode("utf-8", "replace"),
            )
        return False

    def _final_redeem_error(self, voucher, reason):
        self._log.failure(
            "Redeeming random tokens for a voucher ({voucher}) encountered error.",
            reason,
            voucher=voucher,
        )
        return False

    def get_voucher(self, number):
        return self.incorporate_transient_state(self.store.get(number), )

    def incorporate_transient_state(self, voucher):
        """
        Create a new ``Voucher`` which represents the given voucher but which also
        incorporates relevant transient state known to the controller.  For
        example, if a redemption attempt is current in progress, this is
        incorporated.
        """
        if isinstance(voucher.state, model_Pending):
            if voucher.number in self._active:
                return attr.evolve(
                    voucher,
                    state=self._active[voucher.number],
                )
            if voucher.number in self._unpaid:
                return attr.evolve(
                    voucher,
                    state=model_Unpaid(finished=self._unpaid[voucher.number]),
                )
            if voucher.number in self._error:
                return attr.evolve(
                    voucher,
                    state=self._error[voucher.number],
                )
        return voucher
示例#53
0
    def mergeDictionaries(sourceDictionary, destinationDictionary):
        """
        Deep merge dictionaries recursively.

        :param sourceDictionary: <dict> first dictionary with data
        :param destinationDictionary: <dict> second dictionary with data
        :return: <dict> merged dictionary
        """
        log = Logger()
        varNamePattern = re.compile(
            r"^((__((ENV)|(FILE))__[A-Z]{3,})|(__((ENV)|(FILE))))__(?P<name>.*)$"
        )
        varTypePattern = re.compile(
            r"^__((ENV)|(FILE))__(?P<type>[A-Z]{3,})__(.*)$")

        for key, value in sourceDictionary.items():
            # ignoring comments
            if key == "//":
                continue

            if isinstance(value, dict):
                # get node or create one
                node = destinationDictionary.setdefault(key, {})
                Config.mergeDictionaries(value, node)
            elif isinstance(value, str) and (value.startswith("__ENV__")
                                             or value.startswith("__FILE__")):
                # extracting environment variable name
                nameMatch = varNamePattern.match(value)
                if nameMatch is None:
                    log.warn(
                        "Invalid environmental variable specified: {name}",
                        name=value)
                    continue
                envVariableName = nameMatch.group("name")

                # checking if environment variable is set
                if envVariableName not in os.environ:
                    log.warn("No environment variable {name} is set.",
                             name=envVariableName)
                    continue

                if value.startswith(
                        "__ENV__"
                ):  # checking if value is set in the environment variable
                    # checking if variable has a defined cast type
                    typeMatch = varTypePattern.match(value)
                    if typeMatch is not None:
                        envVariableType = typeMatch.group("type")

                        # casting value to the specified type
                        if envVariableType == "STR":
                            destinationDictionary[key] = str(
                                os.environ[envVariableName])
                        elif envVariableType == "BOOL":
                            if os.environ[envVariableName] == "1":
                                destinationDictionary[key] = True
                            elif os.environ[envVariableName] == "0":
                                destinationDictionary[key] = False
                        elif envVariableType == "INT":
                            destinationDictionary[key] = int(
                                os.environ[envVariableName])
                        elif envVariableType == "FLOAT":
                            destinationDictionary[key] = float(
                                os.environ[envVariableName])
                        elif envVariableType == "JSON":
                            try:
                                destinationDictionary[key] = json.loads(
                                    os.environ[envVariableName])
                            except Exception:
                                log.warn(
                                    "Environment variable {name} contains an invalid JSON value.",
                                    name=envVariableName)
                        else:
                            log.warn(
                                "Unsupported type {type} specified for variable {name}.",
                                name=envVariableName,
                                type=envVariableType)
                            continue
                    else:
                        destinationDictionary[key] = os.environ[
                            envVariableName]
                elif value.startswith(
                        "__FILE__"):  # checking if value is set in a file
                    filePath = os.environ[envVariableName]

                    # checking if file exists
                    if not os.path.isfile(filePath):
                        log.warn(
                            "File {filePath} does not exist.",
                            filePath=filePath,
                        )
                        continue

                    # checking if file can be read
                    if not os.access(filePath, os.R_OK):
                        log.warn(
                            "File {filePath} cannot be read.",
                            filePath=filePath,
                        )
                        continue

                    # load file contents
                    filePointer = open(filePath, "r")
                    destinationDictionary[key] = filePointer.read().strip()
                    filePointer.close()
            elif isinstance(value, str) and value.startswith("__FILE__"):
                pass
            else:
                destinationDictionary[key] = value

        return destinationDictionary
示例#54
0
class RistrettoRedeemer(object):
    """
    An ``IRedeemer`` which speaks the Ristretto-flavored PrivacyPass protocol
    described at
    https://docs.rs/challenge-bypass-ristretto/1.0.0-pre.0/challenge_bypass_ristretto/#cryptographic-protocol

    :ivar treq.client.HTTPClient _treq: An HTTP client to use to make calls to
        the issuer.

    :ivar URL _api_root: The root of the issuer HTTP API.
    """
    _log = Logger()

    _treq = attr.ib()
    _api_root = attr.ib(validator=attr.validators.instance_of(URL))

    @classmethod
    def make(cls, section_name, node_config, announcement, reactor):
        configured_issuer = node_config.get_config(
            section=section_name,
            option=u"ristretto-issuer-root-url",
        ).decode("ascii")
        if announcement is not None:
            # Don't let us talk to a storage server that has a different idea
            # about who issues ZKAPs.  We should lift this limitation (that is, we
            # should support as many different issuers as the user likes) in the
            # future but doing so requires changing how the web interface works
            # and possibly also the interface for voucher submission.
            #
            # If we aren't given an announcement then we're not being used in
            # the context of a specific storage server so the check is
            # unnecessary and impossible.
            announced_issuer = announcement[u"ristretto-issuer-root-url"]
            if announced_issuer != configured_issuer:
                raise IssuerConfigurationMismatch(announced_issuer,
                                                  configured_issuer)

        return cls(
            HTTPClient(Agent(reactor)),
            URL.from_text(configured_issuer),
        )

    def random_tokens_for_voucher(self, voucher, counter, count):
        return list(
            RandomToken(
                challenge_bypass_ristretto.RandomToken.create().encode_base64(
                ).decode("ascii"), ) for n in range(count))

    @inlineCallbacks
    def redeemWithCounter(self, voucher, counter, encoded_random_tokens):
        random_tokens = list(
            challenge_bypass_ristretto.RandomToken.decode_base64(
                token.token_value.encode("ascii"))
            for token in encoded_random_tokens)
        blinded_tokens = list(token.blind() for token in random_tokens)
        response = yield self._treq.post(
            self._api_root.child(u"v1", u"redeem").to_text(),
            dumps({
                u"redeemVoucher":
                voucher.number,
                u"redeemCounter":
                counter,
                u"redeemTokens":
                list(token.encode_base64() for token in blinded_tokens),
            }),
            headers={b"content-type": b"application/json"},
        )
        response_body = yield content(response)

        try:
            result = loads(response_body)
        except ValueError:
            raise UnexpectedResponse(response.code, response_body)

        success = result.get(u"success", False)
        if not success:
            reason = result.get(u"reason", None)
            if reason == u"double-spend":
                raise AlreadySpent(voucher)
            elif reason == u"unpaid":
                raise Unpaid(voucher)

        self._log.info(
            "Redeemed: {public_key} {proof} {count}",
            public_key=result[u"public-key"],
            proof=result[u"proof"],
            count=len(result[u"signatures"]),
        )

        marshaled_signed_tokens = result[u"signatures"]
        marshaled_proof = result[u"proof"]
        marshaled_public_key = result[u"public-key"]

        public_key = challenge_bypass_ristretto.PublicKey.decode_base64(
            marshaled_public_key.encode("ascii"), )
        self._log.info("Decoded public key")
        clients_signed_tokens = list(
            challenge_bypass_ristretto.SignedToken.decode_base64(
                marshaled_signed_token.encode("ascii"), )
            for marshaled_signed_token in marshaled_signed_tokens)
        self._log.info("Decoded signed tokens")
        clients_proof = challenge_bypass_ristretto.BatchDLEQProof.decode_base64(
            marshaled_proof.encode("ascii"), )
        with less_limited_stack():
            self._log.info("Decoded batch proof")
            clients_unblinded_tokens = clients_proof.invalid_or_unblind(
                random_tokens,
                blinded_tokens,
                clients_signed_tokens,
                public_key,
            )
        self._log.info("Validated proof")
        unblinded_tokens = list(
            UnblindedToken(token.encode_base64().decode("ascii"))
            for token in clients_unblinded_tokens)
        returnValue(RedemptionResult(
            unblinded_tokens,
            marshaled_public_key,
        ))

    def tokens_to_passes(self, message, unblinded_tokens):
        assert isinstance(message, bytes)
        assert isinstance(unblinded_tokens, list)
        assert all(
            isinstance(element, UnblindedToken)
            for element in unblinded_tokens)
        unblinded_tokens = list(
            challenge_bypass_ristretto.UnblindedToken.decode_base64(
                token.unblinded_token.encode("ascii"))
            for token in unblinded_tokens)
        clients_verification_keys = list(
            token.derive_verification_key_sha512()
            for token in unblinded_tokens)
        clients_signatures = list(
            verification_key.sign_sha512(message)
            for verification_key in clients_verification_keys)
        clients_preimages = list(token.preimage()
                                 for token in unblinded_tokens)
        passes = list(
            Pass(
                preimage.encode_base64().decode("ascii"),
                signature.encode_base64().decode("ascii"),
            ) for (preimage,
                   signature) in zip(clients_preimages, clients_signatures))
        return passes
示例#55
0
from collections import defaultdict

from twisted.internet import defer
from twisted.logger import Logger

from leap.common.check import leap_assert_type
from leap.common.events import emit_async, catalog
from leap.bitmask.mail.adaptors.soledad import SoledadMailAdaptor
from leap.bitmask.mail.constants import INBOX_NAME
from leap.bitmask.mail.constants import MessageFlags
from leap.bitmask.mail.mailbox_indexer import MailboxIndexer
from leap.bitmask.mail.plugins import soledad_sync_hooks
from leap.bitmask.mail.utils import find_charset, CaseInsensitiveDict
from leap.bitmask.mail.utils import lowerdict

log = Logger()

# TODO LIST
# [ ] Probably change the name of this module to "api" or "account", mail is
#     too generic (there's also IncomingMail, and OutgoingMail
# [ ] Profile add_msg.


def _get_mdoc_id(mbox, chash):
    """
    Get the doc_id for the metamsg document.
    """
    return "M+{mbox}+{chash}".format(mbox=mbox, chash=chash)


def _write_and_rewind(payload):
示例#56
0
class NatsProtocol(Protocol):
    server_settings = None
    log = Logger()

    def __init__(self,
                 own_reactor=None,
                 verbose=True,
                 pedantic=False,
                 ssl_required=False,
                 auth_token=None,
                 user="",
                 password="",
                 on_msg=None,
                 on_connect=None,
                 event_subscribers=None,
                 subscriptions=None,
                 unsubs=None):
        """

        @param own_reactor: A Twisted Reactor, defaults to standard. Chiefly
         customizable for testing.
        @param verbose: Turns on +OK protocol acknowledgements.
        @param pedantic: Turns on additional strict format checking, e.g.
         for properly formed subjects
        @param ssl_required: Indicates whether the client requires
         an SSL connection.
        @param auth_token: Client authorization token
        @param user: Connection username (if auth_required is set)
        @param pass: Connection password (if auth_required is set)
        @param on_msg: Handler for messages for subscriptions that don't have
         their own on_msg handler. Default behavior is to write to stdout.
         A callable that takes the following params:
             @param nats_protocol: An instance of NatsProtocol.
             @param sid: A unique alphanumeric subscription ID.
             @param subject: A valid NATS subject.
             @param reply_to: The reply to.
             @param payload: Bytes of the payload.
        @param on_connect: Callable that takes this instance of NatsProtocol
         which will be called upon the first successful connection.
        @param event_subscribers: A collection of functions that take an 
         event/action entity.
        @param subscriptions: A dict of sids and SubscriptionArgs.
        @param unsubs: A dict of sids and ints representing the number 
         of messages for the sid before automatic unsubscription.
        """
        self.reactor = own_reactor if own_reactor else reactor
        self.status = DISCONNECTED
        self.verbose = verbose
        # Set the number of PING sent out
        self.ping_loop = LoopingCall(self.ping)
        self.pout = 0
        self.remaining_bytes = b''

        self.client_info = ClientInfo(verbose, pedantic, ssl_required,
                                      auth_token, user, password, CLIENT_NAME,
                                      LANG, _meta.version)

        if on_msg:
            # Ensure the on_msg signature fits.
            on_msg(nats_protocol=self,
                   sid="0",
                   subject="testSubject",
                   reply_to='inBox',
                   payload=b'hello, world')
        self.on_msg = on_msg
        self.on_connect = on_connect
        self.on_connect_d = defer.Deferred()
        if on_connect:
            self.on_connect_d.addCallback(on_connect)
            self.on_connect_d.addErrback(self._eb_trace_and_raise)
        self.sids = {}
        self.subscriptions = subscriptions if subscriptions is not None else {}
        self.unsubs = unsubs if unsubs else {}
        self.event_subscribers = event_subscribers if event_subscribers is not None else []

    def __repr__(self):
        return r'<NatsProtocol connected={} server_info={}>'.format(
            self.status, self.server_settings)

    def _eb_trace_and_raise(self, failure):
        failure.printTraceback()
        failure.raiseException()

    def dispatch(self, event):
        """Call each event subscriber with the event.
        """
        if self.event_subscribers is None:
            return
        for event_subscriber in self.event_subscribers:
            event_subscriber(event)
        return

    def connectionLost(self, reason=connectionDone):
        """Called when the connection is shut down.

        Clear any circular references here, and any external references
        to this Protocol.  The connection has been closed.

        Clear left over remaining bytes because they won't be continued
        properly upon reconnection.

        @type reason: L{twisted.python.failure.Failure}
        """
        self.status = DISCONNECTED
        self.remaining_bytes = b''
        if reason.check(error.ConnectionLost):
            self.dispatch(actions.ConnectionLost(self, reason=reason))
        else:
            self.dispatch(actions.Disconnected(self, reason=reason))

    def dataReceived(self, data):
        """
        Parse the NATS.io protocol from chunks of data streaming from
        the connected gnatsd.

        The server settings will be set and connect will be sent with this
        client's info upon an INFO, which should happen when the
        transport connects.

        Registered message callback functions will be called with MSGs
        once parsed.

        PONG will be called upon a ping.

        An exception will be raised upon an ERR from gnatsd.

        An +OK doesn't do anything.
        """
        if self.remaining_bytes:
            data = self.remaining_bytes + data
            self.remaining_bytes = b''

        data_buf = BufferedReader(BytesIO(data))
        while True:
            command = data_buf.read(4)
            if command == b"-ERR":
                raise NatsError(data_buf.read())
            elif command == b"+OK\r":
                val = data_buf.read(1)
                if val != b'\n':
                    self.remaining_bytes += command
                    break
            elif command == b"MSG ":
                val = data_buf.readline()
                if not val:
                    self.remaining_bytes += command
                    break
                if not val.endswith(b'\r\n'):
                    self.remaining_bytes += command + val
                    break

                meta_data = val.split(b" ")
                n_bytes = int(meta_data[-1])
                subject = meta_data[0].decode()
                if len(meta_data) == 4:
                    reply_to = meta_data[2].decode()
                elif len(meta_data) == 3:
                    reply_to = None
                else:
                    self.remaining_bytes += command + val
                    break

                sid = meta_data[1].decode()

                if sid in self.sids:
                    on_msg = self.sids[sid]
                else:
                    on_msg = self.on_msg

                payload = data_buf.read(n_bytes)
                if len(payload) != n_bytes:
                    self.remaining_bytes += command + val + payload
                    break

                if on_msg:
                    on_msg(nats_protocol=self,
                           sid=sid,
                           subject=subject,
                           reply_to=reply_to,
                           payload=payload)
                else:
                    stdout.write(command.decode())
                    stdout.write(val.decode())
                    stdout.write(payload.decode())

                self.dispatch(
                    actions.ReceivedMsg(sid,
                                        self,
                                        subject=subject,
                                        payload=payload,
                                        reply_to=reply_to))

                if sid in self.unsubs:
                    self.unsubs[sid] -= 1
                    if self.unsubs[sid] <= 0:
                        self._forget_subscription(sid)
                        self.dispatch(
                            actions.UnsubMaxReached(sid, protocol=self))

                payload_post = data_buf.readline()
                if payload_post != b'\r\n':
                    self.remaining_bytes += (command + val + payload +
                                             payload_post)
                    break
            elif command == b"PING":
                self.dispatch(actions.ReceivedPing(self))
                self.pong()
                val = data_buf.readline()
                if val != b'\r\n':
                    self.remaining_bytes += command + val
                    break
            elif command == b"PONG":
                self.pout -= 1
                self.dispatch(
                    actions.ReceivedPong(self, outstanding_pings=self.pout))
                val = data_buf.readline()
                if val != b'\r\n':
                    self.remaining_bytes += command + val
                    break
            elif command == b"INFO":
                val = data_buf.readline()
                if not val.endswith(b'\r\n'):
                    self.remaining_bytes += command + val
                    break
                settings = json.loads(val.decode('utf8'))
                self.server_settings = ServerInfo(**settings)
                self.dispatch(
                    actions.ReceivedInfo(self,
                                         server_info=self.server_settings))
                self.status = CONNECTED
                self.pout = 0
                self.sids = {}
                self.connect()
                if self.on_connect_d:
                    self.on_connect_d.callback(self)
                    self.on_connect_d = None
            else:
                self.dispatch(actions.UnhandledCommand(self, command=command))
                val = data_buf.read()
                self.remaining_bytes += command + val
            if not data_buf.peek(1):
                break

    def connect(self):
        """
        Tell the NATS server about this client and it's options.
        """
        action = actions.SendConnect(self, client_info=self.client_info)
        payload = 'CONNECT {}\r\n'.format(
            json.dumps(self.client_info.asdict_for_connect(),
                       separators=(',', ':')))

        self.transport.write(payload.encode())
        self.dispatch(action)

    def pub(self, subject, payload, reply_to=None):
        """
        Publish a payload of bytes to a subject.

        @param subject: The destination subject to publish to.
        @param reply_to: The reply inbox subject that subscribers can use
         to send a response back to the publisher/requestor.
        @param payload: The message payload data, in bytes.
        """
        action = actions.SendPub(self, subject, payload, reply_to)
        reply_part = ""
        if reply_to:
            reply_part = "{} ".format(reply_to)

        # TODO: deal with the payload if it is bigger than the server max.
        op = "PUB {} {}{}\r\n".format(subject, reply_part,
                                      len(payload)).encode()
        op += payload + b'\r\n'
        self.transport.write(op)
        self.dispatch(action)

    def apply_subscriptions(self):
        """
        Subscribe all the subscriptions and unsubscribe all of 
        the unsubscriptions.

        Builds the state of subscriptions and unsubscriptions 
        with max messages.
        """
        if self.status == CONNECTED:
            for sid, sub_args in self.subscriptions.items():
                self.sub(sub_args.subject, sub_args.sid, sub_args.queue_group,
                         sub_args.on_msg)
                if sid in self.unsubs:
                    self.unsub(sid, max_msgs=self.unsubs[sid])

    def sub(self, subject, sid, queue_group=None, on_msg=None):
        """
        Subscribe to a subject.

        @param subject: The subject name to subscribe to.
        @param sid: A unique alphanumeric subscription ID.
        @param queue_group: If specified, the subscriber will
         join this queue group.
         @param on_msg: A callable that takes the following params:
             @param nats_protocol: An instance of NatsProtocol.
             @param sid: A unique alphanumeric subscription ID.
             @param subject: A valid NATS subject.
             @param reply_to: The reply to.
             @param payload: Bytes of the payload.
        """
        self.sids[sid] = on_msg
        self.subscriptions[sid] = SubscriptionArgs(subject, sid, queue_group,
                                                   on_msg)
        self.dispatch(
            actions.SendSub(sid=sid,
                            protocol=self,
                            subject=subject,
                            queue_group=queue_group,
                            on_msg=on_msg))

        queue_group_part = ""
        if queue_group:
            queue_group_part = "{} ".format(queue_group)

        op = "SUB {} {}{}\r\n".format(subject, queue_group_part, sid)
        self.transport.write(op.encode('utf8'))

    def _forget_subscription(self, sid):
        """Undeclare a subscription. Any on_msg declared for the subscription 
        will no longer be called.
        If a apply_subscriptions is called,
        which it is during a reconnect, These subscriptions will not be 
        established. """
        if sid in self.unsubs:
            del self.unsubs[sid]
        if sid in self.subscriptions:
            del self.subscriptions[sid]
        if sid in self.sids:
            del self.sids[sid]

    def unsub(self, sid, max_msgs=None):
        """
        Unsubcribes the connection from the specified subject,
        or auto-unsubscribes after the specified
        number of messages has been received.

        @param sid: The unique alphanumeric subscription ID of
         the subject to unsubscribe from.
        @type sid: str
        @param max_msgs: Optional number of messages to wait for before
         automatically unsubscribing.
        @type max_msgs: int
        """
        action = actions.SendUnsub(sid=sid, protocol=self, max_msgs=max_msgs)
        max_msgs_part = ""
        if max_msgs:
            max_msgs_part = "{}".format(max_msgs)
            self.unsubs[sid] = max_msgs
        else:
            self._forget_subscription(sid)

        op = "UNSUB {} {}\r\n".format(sid, max_msgs_part)
        self.transport.write(op.encode('utf8'))
        self.dispatch(action)

    def ping(self):
        """
        Send ping.
        """
        op = b"PING\r\n"
        self.transport.write(op)
        self.pout += 1
        self.dispatch(actions.SendPing(self, outstanding_pings=self.pout))

    def pong(self):
        """
        Send pong.
        """
        op = b"PONG\r\n"
        self.transport.write(op)
        self.dispatch(actions.SendPong(self))

    def request(self, sid, subject):
        """
        Make a synchronous request for a subject.

        Make a reply to.
        Subscribe to the subject.
        Make a Deferred and add it to the inbox under the reply to.
        Do auto unsubscribe for one message.
        """
        raise NotImplementedError()
示例#57
0
class Receiver(gr.hier_block2, ExportedState):
    __log = Logger(
    )  # TODO: plumb this in from top so we can start giving error messages to the client e.g. in the "unknown mode" case.

    def __init__(self,
                 mode,
                 freq_absolute=100.0,
                 freq_relative=None,
                 freq_linked_to_device=False,
                 audio_destination=None,
                 device_name=None,
                 audio_gain=-6,
                 audio_pan=0,
                 audio_channels=0,
                 context=None):
        assert audio_channels == 1 or audio_channels == 2
        assert audio_destination is not None
        assert device_name is not None
        gr.hier_block2.__init__(
            self,
            type(self).__name__, gr.io_signature(1, 1, gr.sizeof_gr_complex),
            gr.io_signature(1, 1, gr.sizeof_float * audio_channels))

        if lookup_mode(mode) is None:
            # TODO: communicate back to client if applicable
            self.__log.error('Unknown mode {mode!r} in Receiver(); using AM',
                             mode=mode)
            mode = 'AM'

        # Provided by caller
        self.context = context
        self.__audio_channels = audio_channels

        # cached info from device
        self.__device_name = device_name

        # Simple state
        self.mode = mode
        self.audio_gain = audio_gain
        self.audio_pan = min(1, max(-1, audio_pan))
        self.__audio_destination = audio_destination

        # Receive frequency.
        self.__freq_linked_to_device = bool(freq_linked_to_device)
        if self.__freq_linked_to_device and freq_relative is not None:
            self.__freq_relative = float(freq_relative)
            self.__freq_absolute = self.__freq_relative + self.__get_device(
            ).get_freq()
        else:
            self.__freq_absolute = float(freq_absolute)
            self.__freq_relative = self.__freq_absolute - self.__get_device(
            ).get_freq()

        # Blocks
        self.__rotator = blocks.rotator_cc()
        self.__demodulator = self.__make_demodulator(mode, {})
        self.__update_demodulator_info()
        self.__audio_gain_block = blocks.multiply_const_vff([0.0] *
                                                            audio_channels)
        self.probe_audio = analog.probe_avg_mag_sqrd_f(
            0, alpha=10.0 / 44100)  # TODO adapt to output audio rate

        # Other internals
        self.__last_output_type = None

        self.__update_rotator(
        )  # initialize rotator, also in case of __demod_tunable
        self.__update_audio_gain()
        self.__do_connect(reason=u'initialization')

    def __update_demodulator_info(self):
        self.__demod_tunable = ITunableDemodulator.providedBy(
            self.__demodulator)
        output_type = self.__demodulator.get_output_type()
        assert isinstance(output_type, SignalType)
        # TODO: better expression of this condition
        assert output_type.get_kind() == 'STEREO' or output_type.get_kind(
        ) == 'MONO' or output_type.get_kind() == 'NONE'
        self.__demod_output = output_type.get_kind() != 'NONE'
        self.__demod_stereo = output_type.get_kind() == 'STEREO'
        if self.__demod_output:
            self.__output_type = SignalType(
                kind='STEREO', sample_rate=output_type.get_sample_rate())
        else:
            self.__output_type = no_signal

    def __do_connect(self, reason):
        self.__log.debug('receiver do_connect: {reason}', reason=reason)
        self.context.lock()
        try:
            self.disconnect_all()

            # Connect input of demodulator
            if self.__demod_tunable:
                self.connect(self, self.__demodulator)
            else:
                self.connect(self, self.__rotator, self.__demodulator)

            if self.__demod_output:
                # Construct stereo-to-mono conversion (used at least for level probe)
                if self.__demod_stereo:
                    splitter = blocks.vector_to_streams(gr.sizeof_float, 2)
                    mono_audio = blocks.multiply_matrix_ff(((0.5, 0.5), ))
                    self.connect(self.__demodulator, splitter)
                    self.connect((splitter, 0), (mono_audio, 0))
                    self.connect((splitter, 1), (mono_audio, 1))
                else:
                    mono_audio = self.__demodulator

                # Connect mono audio to level probe
                self.connect(mono_audio, self.probe_audio)

                # Connect demodulator to output gain control, converting as needed
                if (self.__audio_channels == 2) == self.__demod_stereo:
                    # stereo to stereo or mono to mono
                    self.connect(self.__demodulator, self.__audio_gain_block)
                elif self.__audio_channels == 2 and not self.__demod_stereo:
                    # mono to stereo
                    duplicator = blocks.streams_to_vector(gr.sizeof_float, 2)
                    self.connect(self.__demodulator, (duplicator, 0))
                    self.connect(self.__demodulator, (duplicator, 1))
                    self.connect(duplicator, self.__audio_gain_block)
                elif self.__audio_channels == 1 and self.__demod_stereo:
                    # stereo to mono
                    self.connect(mono_audio, self.__audio_gain_block)
                else:
                    raise Exception('shouldn\'t happen')

                # Connect gain control to output of receiver
                self.connect(self.__audio_gain_block, self)
            else:
                # Dummy output, ignored by containing block
                self.connect(
                    blocks.vector_source_f([], vlen=self.__audio_channels),
                    self)

            if self.__output_type != self.__last_output_type:
                self.__last_output_type = self.__output_type
                self.context.changed_needed_connections(u'changed output type')
        finally:
            self.context.unlock()

    def get_output_type(self):
        return self.__output_type

    def changed_device_freq(self):
        if self.__freq_linked_to_device:
            self.__freq_absolute = self.__freq_relative + self.__get_device(
            ).get_freq()
        else:
            self.__freq_relative = self.__freq_absolute - self.__get_device(
            ).get_freq()
        self.__update_rotator()
        # note does not revalidate() because the caller will handle that
        self.state_changed('rec_freq')
        self.state_changed('is_valid')

    @exported_value(type=ReferenceT(), changes='explicit')
    def get_demodulator(self):
        return self.__demodulator

    @exported_value(type_fn=lambda self: self.context.get_rx_device_type(),
                    changes='this_setter',
                    label='RF source')
    def get_device_name(self):
        return self.__device_name

    @setter
    def set_device_name(self, value):
        value = self.context.get_rx_device_type()(value)
        if self.__device_name != value:
            self.__device_name = value
            self.changed_device_freq()  # freq
            self._rebuild_demodulator(
                reason=u'changed device, thus maybe sample rate')  # rate
            self.context.changed_needed_connections(u'changed device')

    # type construction is deferred because we don't want loading this file to trigger loading plugins
    @exported_value(
        type_fn=lambda self: EnumT({d.mode: d.info
                                    for d in get_modes()}),
        changes='this_setter',
        label='Mode')
    def get_mode(self):
        return self.mode

    @setter
    def set_mode(self, mode):
        mode = six.text_type(mode)
        if mode == self.mode: return
        if self.__demodulator and \
                IDemodulatorModeChange.providedBy(self.__demodulator) and \
                self.__demodulator.can_set_mode(mode):
            self.__demodulator.set_mode(mode)
            self.mode = mode
        else:
            self._rebuild_demodulator(mode=mode, reason=u'changed mode')

    # TODO: rename rec_freq to just freq
    @exported_value(type=QuantityT(units.Hz),
                    parameter='freq_absolute',
                    changes='explicit',
                    label='Frequency')
    def get_rec_freq(self):
        return self.__freq_absolute

    @setter
    def set_rec_freq(self, absolute):
        absolute = float(absolute)

        if self.__freq_linked_to_device:
            # Temporarily violating the (device freq + relative freq = absolute freq) invariant, which will be restored below by changing the device freq.
            self.__freq_absolute = absolute
        else:
            self.__freq_absolute = absolute
            self.__freq_relative = absolute - self.__get_device().get_freq()

        self.__update_rotator()

        if self.__freq_linked_to_device:
            # TODO: reconsider whether we should be giving commands directly to the device, vs. going through the context.
            self.__get_device().set_freq(self.__freq_absolute -
                                         self.__freq_relative)
        else:
            self.context.revalidate(tuning=True)
        self.state_changed('rec_freq')
        self.state_changed('is_valid')

    @exported_value(
        type=bool,
        changes='this_setter',
        label='Follow device',
        description=
        'When this receiver\'s frequency or the device\'s frequency is changed, maintain the relative offset between them.'
    )
    def get_freq_linked_to_device(self):
        return self.__freq_linked_to_device

    @setter
    def set_freq_linked_to_device(self, value):
        self.__freq_linked_to_device = bool(value)

    # TODO: support non-audio demodulators at which point these controls should be optional
    @exported_value(parameter='audio_gain',
                    type=RangeT([(-30, 20)], unit=units.dB, strict=False),
                    changes='this_setter',
                    label='Volume')
    def get_audio_gain(self):
        return self.audio_gain

    @setter
    def set_audio_gain(self, value):
        self.audio_gain = value
        self.__update_audio_gain()

    @exported_value(type_fn=lambda self: RangeT(
        [(-1, 1)] if self.__audio_channels > 1 else [(0, 0)], strict=True),
                    changes='this_setter',
                    label='Pan')
    def get_audio_pan(self):
        return self.audio_pan

    @setter
    def set_audio_pan(self, value):
        self.audio_pan = value
        self.__update_audio_gain()

    @exported_value(
        type_fn=lambda self: self.context.get_audio_destination_type(),
        changes='this_setter',
        label='Audio destination')
    def get_audio_destination(self):
        return self.__audio_destination

    @setter
    def set_audio_destination(self, value):
        if self.__audio_destination != value:
            self.__audio_destination = value
            self.context.changed_needed_connections(u'changed destination')

    @exported_value(type=bool, changes='explicit')
    def get_is_valid(self):
        if self.__demodulator is None:
            return False
        half_sample_rate = self.__get_device().get_rx_driver().get_output_type(
        ).get_sample_rate() / 2
        demod_shape = self.__demodulator.get_band_shape()
        valid_bandwidth_lower = -half_sample_rate - self.__freq_relative
        valid_bandwidth_upper = half_sample_rate - self.__freq_relative
        return (valid_bandwidth_lower <= min(0, demod_shape.pass_low)
                and valid_bandwidth_upper >= max(0, demod_shape.pass_high))

    # Note that the receiver cannot measure RF power because we don't know what the channel bandwidth is; we have to leave that to the demodulator.
    # TODO: document what we are using as the reference level. It's not dBFS because we're floating-point and before the gain stage.
    @exported_value(type=RangeT([(_audio_power_minimum_dB, 0)],
                                unit=units.dB,
                                strict=False),
                    changes='continuous',
                    label='Audio power')
    def get_audio_power(self):
        if self.get_is_valid():
            return to_dB(
                max(_audio_power_minimum_amplitude, self.probe_audio.level()))
        else:
            # will not be receiving samples, so probe's value will be meaningless
            return _audio_power_minimum_dB

    def __update_rotator(self):
        device = self.__get_device()
        sample_rate = device.get_rx_driver().get_output_type().get_sample_rate(
        )
        if self.__demod_tunable:
            # TODO: Method should perhaps be renamed to convey that it is relative
            self.__demodulator.set_rec_freq(self.__freq_relative)
        else:
            self.__rotator.set_phase_inc(
                rotator_inc(rate=sample_rate, shift=-self.__freq_relative))

    def __get_device(self):
        return self.context.get_device(self.__device_name)

    # called from facet
    def _rebuild_demodulator(self, mode=None, reason='<unspecified>'):
        self.__rebuild_demodulator_nodirty(mode)
        self.__do_connect(reason=u'demodulator rebuilt: %s' % (reason, ))
        # TODO write a test showing that revalidate is needed and works
        self.context.revalidate(tuning=False)  # in case our bandwidth changed
        self.state_changed('is_valid')

    def __rebuild_demodulator_nodirty(self, mode=None):
        if self.__demodulator is None:
            defaults = {}
        else:
            defaults = self.__demodulator.state_to_json()
        if mode is None:
            mode = self.mode
        self.__demodulator = self.__make_demodulator(mode, defaults)
        self.__update_demodulator_info()
        self.__update_rotator()
        self.mode = mode
        self.state_changed('demodulator')

        # Replace blocks downstream of the demodulator so as to flush samples that are potentially at a different sample rate and would therefore be audibly wrong. Caller will handle reconnection.
        self.__audio_gain_block = blocks.multiply_const_vff(
            [0.0] * self.__audio_channels)
        self.__update_audio_gain()

    def __make_demodulator(self, mode, state):
        """Returns the demodulator."""

        t0 = time.time()

        mode_def = lookup_mode(mode)
        if mode_def is None:
            # TODO: Better handling, like maybe a dummy demod
            raise ValueError('Unknown mode: ' + mode)
        clas = mode_def.demod_class

        state = state.copy()  # don't modify arg
        if 'mode' in state:
            del state[
                'mode']  # don't switch back to the mode we just switched from

        facet = ContextForDemodulator(self)

        init_kwargs = dict(mode=mode,
                           input_rate=self.__get_device().get_rx_driver().
                           get_output_type().get_sample_rate(),
                           context=facet)
        demodulator = IDemodulator(
            unserialize_exported_state(ctor=clas,
                                       state=state,
                                       kwargs=init_kwargs))

        # until _enabled, ignore any callbacks resulting from unserialization calling setters
        facet._enabled = True
        self.__log.debug('Constructed {mode} demodulator: {time_ms} ms.',
                         mode=mode,
                         time_ms=(time.time() - t0) * 1000)
        return demodulator

    def __update_audio_gain(self):
        gain_lin = dB(self.audio_gain)
        if self.__audio_channels == 2:
            pan = self.audio_pan
            # TODO: Instead of left-to-left and right-to-right, panning other than center should mix left and right content. (A "pan law" defines the proper mix.) This implies a matrix multiplication type operation.
            self.__audio_gain_block.set_k([
                gain_lin * (1 - pan),
                gain_lin * (1 + pan),
            ])
        else:
            self.__audio_gain_block.set_k([gain_lin])
示例#58
0
 def render_POST(self, txrequest):
     logger = Logger(namespace='- ListDbSchedule -')
     args = dict((k, v[0])
                 for k, v in native_stringify_dict(copy(txrequest.args),
                                                   keys_only=False).items())
     projects_need = set(eval(
         args.get('projects'))) if args.get('projects') else set()
     spiders_need = set(eval(
         args.get('spiders'))) if args.get('spiders') else set()
     sta = "ok"
     res = None
     try:
         lock.acquire()
         if database_type == 'mysql':
             res = database_connector.get_result(model=SpiderScheduleModel,
                                                 fields=[
                                                     'id', 'project',
                                                     'spider', 'schedule',
                                                     'args', 'runtime',
                                                     'status'
                                                 ])
         else:
             res = database_connector.get(model_name='SpiderScheduleModel',
                                          key_list=[
                                              'id', 'project', 'spider',
                                              'schedule', 'args', 'runtime',
                                              'status'
                                          ])
         lock.release()
     except Exception as E:
         logger.info(
             'something wrong when getting database datas: {}'.format(E))
         sta = 'Error'
     if res:
         db_schedules = [
             {
                 'id': x.id,
                 'project': x.project,
                 'spider': x.spider,
                 'schedule': x.schedule,
                 'args': x.args,
                 'runtime': x.runtime,
                 'status': x.status,
                 # 'create_time': x.create_time.strftime("%Y-%m-%d %H:%M:%S"),
                 # 'update_time': x.update_time.strftime("%Y-%m-%d %H:%M:%S"),
             } for x in res
         ]
         if spiders_need:
             db_schedules_temp = [
                 x for x in db_schedules if x.get('spider') in spiders_need
             ]
             if projects_need:
                 db_schedules_temp = [
                     x for x in db_schedules_temp
                     if x.get('project') in projects_need
                 ]
             db_schedules = db_schedules_temp
         elif projects_need:
             db_schedules_temp = [
                 x for x in db_schedules
                 if x.get('project') in projects_need
             ]
             db_schedules = db_schedules_temp
     else:
         db_schedules = None
     total = len(db_schedules) if db_schedules else 0
     final = {
         "node_name": self.root.nodename,
         "total schedules": total,
         "status": sta,
         "database_schedules": db_schedules
     }
     return final
示例#59
0
class Web3Client:

    is_local = False

    GETH = 'Geth'
    PARITY = 'Parity'
    ALT_PARITY = 'Parity-Ethereum'
    GANACHE = 'EthereumJS TestRPC'
    ETHEREUM_TESTER = 'EthereumTester'  # (PyEVM)
    SYNC_TIMEOUT_DURATION = 60  # seconds to wait for various blockchain syncing endeavors
    PEERING_TIMEOUT = 30
    SYNC_SLEEP_DURATION = 5

    class ConnectionNotEstablished(RuntimeError):
        pass

    class SyncTimeout(RuntimeError):
        pass

    def __init__(self, w3, node_technology: str, version: str, platform: str,
                 backend: str):

        self.w3 = w3
        self.node_technology = node_technology
        self.node_version = version
        self.platform = platform
        self.backend = backend
        self.log = Logger(self.__class__.__name__)

    @classmethod
    def _get_variant(cls, w3):
        return cls

    @classmethod
    def from_w3(cls, w3: Web3) -> 'Web3Client':
        """

        Client version strings
        ======================

        Geth    -> 'Geth/v1.4.11-stable-fed692f6/darwin/go1.7'
        Parity  -> 'Parity-Ethereum/v2.5.1-beta-e0141f8-20190510/x86_64-linux-gnu/rustc1.34.1'
        Ganache -> 'EthereumJS TestRPC/v2.1.5/ethereum-js'
        PyEVM   -> 'EthereumTester/0.1.0b39/linux/python3.6.7'
        """
        clients = {

            # Geth
            cls.GETH: GethClient,

            # Parity
            cls.PARITY: ParityClient,
            cls.ALT_PARITY: ParityClient,

            # Test Clients
            cls.GANACHE: GanacheClient,
            cls.ETHEREUM_TESTER: EthereumTesterClient,
        }

        try:
            client_data = w3.clientVersion.split('/')
            node_technology = client_data[0]
            ClientSubclass = clients[node_technology]

        except (ValueError, IndexError):
            raise ValueError(
                f"Invalid client version string. Got '{w3.clientVersion}'")

        except KeyError:
            raise NotImplementedError(
                f'{w3.clientVersion} is not a supported ethereum client')

        client_kwargs = {
            'node_technology': node_technology,
            'version': client_data[1],
            'backend': client_data[-1],
            'platform': client_data[2]
            if len(client_data) == 4 else None  # Platform is optional
        }

        instance = ClientSubclass._get_variant(w3)(w3, **client_kwargs)
        return instance

    @property
    def peers(self):
        raise NotImplementedError

    @property
    def chain_name(self) -> str:
        if not self.is_local:
            return PUBLIC_CHAINS[int(self.chain_id)]
        name = LOCAL_CHAINS.get(self.chain_id, UNKNOWN_DEVELOPMENT_CHAIN_ID)
        return name

    @property
    def syncing(self) -> Union[bool, dict]:
        return self.w3.eth.syncing

    def lock_account(self, address) -> bool:
        if self.is_local:
            return True
        return NotImplemented

    def unlock_account(self, address, password, duration=None) -> bool:
        if self.is_local:
            return True
        return NotImplemented

    @property
    def is_connected(self):
        return self.w3.isConnected()

    @property
    def etherbase(self) -> str:
        return self.w3.eth.accounts[0]

    @property
    def accounts(self):
        return self.w3.eth.accounts

    def get_balance(self, account):
        return self.w3.eth.getBalance(account)

    def inject_middleware(self, middleware, **kwargs):
        self.w3.middleware_onion.inject(middleware, **kwargs)

    @property
    def chain_id(self) -> int:
        try:
            # from hex-str
            return int(self.w3.eth.chainId, 16)
        except TypeError:
            # from str
            return int(self.w3.eth.chainId)

    @property
    def net_version(self) -> int:
        return int(self.w3.net.version)

    def get_contract(self, **kwargs):
        return self.w3.eth.contract(**kwargs)

    @property
    def gas_price(self):
        return self.w3.eth.gasPrice

    @property
    def block_number(self) -> int:
        return self.w3.eth.blockNumber

    @property
    def coinbase(self) -> str:
        return self.w3.eth.coinbase

    def wait_for_receipt(self, transaction_hash: str, timeout: int) -> dict:
        receipt = self.w3.eth.waitForTransactionReceipt(
            transaction_hash=transaction_hash, timeout=timeout)
        return receipt

    def sign_transaction(self, transaction: dict):
        raise NotImplementedError

    def get_transaction(self, transaction_hash) -> str:
        return self.w3.eth.getTransaction(transaction_hash=transaction_hash)

    def send_transaction(self, transaction: dict) -> str:
        return self.w3.eth.sendTransaction(transaction=transaction)

    def send_raw_transaction(self, transaction: bytes) -> str:
        return self.w3.eth.sendRawTransaction(raw_transaction=transaction)

    def sign_message(self, account: str, message: bytes) -> str:
        """
        Calls the appropriate signing function for the specified account on the
        backend. If the backend is based on eth-tester, then it uses the
        eth-tester signing interface to do so.
        """
        return self.w3.eth.sign(account, data=message)

    def _has_latest_block(self):
        # check that our local chain data is up to date
        return (time.time() - self.w3.eth.getBlock(
            self.w3.eth.blockNumber)['timestamp']) < 30

    def sync(self, timeout: int = 120, quiet: bool = False):

        # Provide compatibility with local chains
        if self.is_local:
            return

        # Record start time for timeout calculation
        now = maya.now()
        start_time = now

        def check_for_timeout(t):
            last_update = maya.now()
            duration = (last_update - start_time).total_seconds()
            if duration > t:
                raise self.SyncTimeout

        while not self._has_latest_block():
            # Check for ethereum peers
            self.log.info(
                f"Waiting for Ethereum peers ({len(self.peers)} known)")
            while not self.peers:
                time.sleep(0)
                check_for_timeout(t=self.PEERING_TIMEOUT)

            # Wait for sync start
            self.log.info(
                f"Waiting for {self.chain_name.capitalize()} chain synchronization to begin"
            )
            while not self.syncing:
                time.sleep(0)
                check_for_timeout(t=self.SYNC_TIMEOUT_DURATION * 2)

            while True:
                #  TODO:  Should this timeout eventually?
                syncdata = self.syncing
                if not syncdata:
                    return False

                self.log.info(
                    f"Syncing {syncdata['currentBlock']}/{syncdata['highestBlock']}"
                )
                time.sleep(self.SYNC_SLEEP_DURATION)
                yield syncdata

        return True
示例#60
0
from twisted.web._stan import Tag, slot, Comment, CDATA, CharRef
from twisted.web.iweb import ITemplateLoader
from twisted.logger import Logger

TEMPLATE_NAMESPACE = "http://twistedmatrix.com/ns/twisted.web.template/0.1"

# Go read the definition of NOT_DONE_YET. For lulz. This is totally
# equivalent. And this turns out to be necessary, because trying to import
# NOT_DONE_YET in this module causes a circular import which we cannot escape
# from. From which we cannot escape. Etc. glyph is okay with this solution for
# now, and so am I, as long as this comment stays to explain to future
# maintainers what it means. ~ C.
#
# See http://twistedmatrix.com/trac/ticket/5557 for progress on fixing this.
NOT_DONE_YET = 1
_moduleLog = Logger()


class _NSContext:
    """
    A mapping from XML namespaces onto their prefixes in the document.
    """
    def __init__(self, parent=None):
        """
        Pull out the parent's namespaces, if there's no parent then default to
        XML.
        """
        self.parent = parent
        if parent is not None:
            self.nss = OrderedDict(parent.nss)
        else: