Пример #1
0
class TemperatureSensor(Service):
    '''
    classdocs
    '''
    version = (1, 0)
    serviceType = 'urn:schemas-upnp-org:service:TemperatureSensor:1'
    serviceId = 'urn:schemas-upnp-org:serviceId:TemperatureSensor'
    serviceUrl = 'temp'
    type = 'Temperature'
    subscription_timeout_range = (None, None)

    def __init__(self, xmlfile, client, name='Application', system=False):
        '''
        Constructor
        '''
        super(TemperatureSensor, self).__init__(
            self.type, self.serviceType, xml=xmlfile,
            client=client, appname=name)
        self.log = Logger()
        self.client = client
        if system:
            self.application = 'Outdoor'
        else:
            self.application = 'Room'
        self.client.UPNP_Temp_event = self.upnp_event
        self.currenttemperature = 2000
        self.name = name

    def upnp_event(self, evt, var):
        self.log.debug('temp event: %s  ==> %s' % (var, evt))
        setattr(self, var, evt)
class FanOperatingMode(Service):
    '''
    classdocs
    '''
    version = (1, 0)
    serviceType = 'urn:schemas-upnp-org:service:HVAC_FanOperatingMode:1'
    serviceId = 'urn:schemas-upnp-org:serviceId:HVAC_FanOperatingMode'
    serviceUrl = 'fanmode'
    type = 'FanOperating'
    subscription_timeout_range = (None, None)

    def __init__(self, xmlfile, client, name='Application', system=False):
        '''
        Constructor
        '''
        super(FanOperatingMode, self).__init__(
            self.type, self.serviceType, xml=xmlfile,
            client=client, appname=name)
        self.log = Logger()
        self.client = client
        self.client.UPNP_fan_event = self.upnp_event
        self.mode = 'ContinuousOn'
        self.fanstatus = 'Off'
        self.name = name

    def upnp_event(self, evt, var):
        self.log.debug('fan event: %s  ==> %s' % (var, evt))
        setattr(self, var, evt)
Пример #3
0
class i2cProtocol(LineOnlyReceiver):

    def __init__(self):
        self.log = Logger()
        self.__funcs = {}

    def connectionMade(self):
        self.log.debug('i2c connected')

    def lineReceived(self, line):
        line = line.strip()
        called = line[:9].lstrip('0')
        onoff = bool(int(line[-1]))
        try:
            call = self.__funcs[called]
        except:
            return
        else:
            call(onoff)

    def send_on(self):
        self.transport.write(self.factory.on_msg)

    def send_off(self):
        self.transport.write(self.factory.off_msg)

    def addCallback(self, name, func):
        self.__funcs[name] = func

    def remCallback(self, name):
        try:
            del self.__funcs[name]
        except KeyError:
            return
Пример #4
0
class HouseStatus(Service):
    '''
    classdocs
    '''
    version = (1, 0)
    serviceType = 'urn:schemas-upnp-org:service:HouseStatus:1'
    serviceId = 'urn:schemas-upnp-org:serviceId:HouseStatus'
    serviceUrl = 'house'
    type = 'House'
    subscription_timeout_range = (None, None)

    def __init__(self, xmlfile, client, name='Application'):
        '''
        Constructor
        '''
        super(HouseStatus, self).__init__(
            self.type, self.serviceType, xml=xmlfile,
            client=client, appname=name)
        self.log = Logger()
        self.client = client
        self.client.houses.append(self)
        self.occupancystate = 'Indeterminate'
        self.activitylevel = 'Regular'
        self.dormancylevel = 'Regular'

    def upnp_event(self, evt, var):
        self.log.debug('away event: %s  ==> %s' % (var, evt))
        setattr(self, var, evt)
Пример #5
0
class Volume(Service):
    version = (1, 0)
    serviceType = "urn:av-openhome-org:service:Volume:1"
    serviceId = "urn:av-openhome-org:serviceId:Volume"
    serviceUrl = "Volume"
    type = 'Volume'
    subscription_timeout_range = (None, None)

    def __init__(self, xmlfile, client, name='Application'):
        super(Volume, self).__init__(
            self.type, self.serviceType, xml=xmlfile,
            client=client, appname=name)
        self.log = Logger()
        self.client = client
        self.client.oh_eventVOLUME = self.upnp_event
        self.volumemax = self.client.max_volume
        self.volumeunity = 3
        self.volume = self.volumemax
        self.volumesteps = self.volumemax
        self.volumemillidbperstep = 600
        self.balancemax = 10
        self.balance = 0
        self.fademax = 10
        self.fade = 0
        self.mute = 0

    def upnp_event(self, evt, var):
        self.log.debug('volume event: %s  ==> %s' % (var, evt))
        setattr(self, var, evt)
Пример #6
0
class Demo_light_factory(ReconnectingClientFactory, Client):

    def __init__(self, long_address=b'\x00\x00\x00\x00\x00\x00\xFF\xFF',
                 address=b'\xFF\xFE', pin=0,
                 api_level=1, net_type=None, stateless=True):
        self.long_address = long_address
        self.address = address
        self._pin = pin
        self.pin = 'dio-' + bytes(pin)
        self.status = False
        self.proto = None
        self.log = Logger()
        self.callback = self.receive
        self.stateless = stateless

    '''
    Remote functions
    '''

    def r_set_target(self, value):

        if value is not self.status:
            if value is True:
                self.proto.remote_at(dest_addr_long=self.long_address,
                                     command=b'D%d' % self._pin,
                                     parameter=b'\x05')
            else:
                self.proto.remote_at(dest_addr_long=self.long_address,
                                     command=b'D%d' % self._pin,
                                     parameter=b'\x04')
                
            if self.stateless:
                self.status = value
                self.event(value, 'status')

    def r_get_target(self):
        return self.status

    def r_get_status(self):
        return self.status

    def set_status(self, status):
        if status is not self.status:
            self.log.debug('%r --> %s' % (self.long_address,
                                          'jour!' if status else 'nuit!'))
            self.status = status
            self.event(status, 'status')

    def receive(self, data):
        if 'samples' in data:
            for sample in data['samples']:
                if self.pin in sample:
                    self.set_status(sample[self.pin])
        elif 'parameter' in data:
            for sample in data['parameter']:
                if self.pin in sample:
                    self.set_status(sample[self.pin])
Пример #7
0
class Fake_HE_endpoint(object):
    bus = None
    clients = {}

    def __init__(self, reactor, bus_addr, addr, speed):
        self.random = False
        self.log = Logger()
        self.reactor = reactor
        self.bus_addr = bus_addr
        self.pair = addr
        self.speed = speed
        self.running = False

    def connect(self, clientFactory):
        proto = clientFactory.proto
        proto.transport = self
        if clientFactory.addr not in self.clients:
            self.clients.update({clientFactory.addr: proto})
        if not self.bus:
            r = task.LoopingCall(self.check)
            r.start(20)
        clientFactory.doStart()
        return defer.succeed(None)

    def check(self):
        if not self.running:
            for client in self.clients.values():
                client.connectionMade()
            self.running = True
            self.bus = True
        self.random = not self.random
        l = '162342660' + str(int(self.random))
        ll = '334455660' + str(int(not self.random))
        if l[:-1] in self.clients:
            self.clients[l[:-1]].lineReceived(l)
        if ll[:-1] in self.clients:
            self.clients[ll[:-1]].lineReceived(ll)

    def write(self, msg):
        t = []
        if len(msg) < 11:
            for n in msg:
                t.append(ord(n))
        else:
            raise Exception('too much data')
        self.log.debug('send %s to i2c link' % t)
Пример #8
0
class serialLineProtocol(LineOnlyReceiver):

    def __init__(self):
        self.log = Logger()
        self.__callbacks = {}

    def connectionMade(self):
        self.log.debug('serial connected')

    def lineReceived(self, line):
        for name in self.__callbacks:
            self.__callbacks[name](line)

    def send(self, data):
        self.transport.write(data)

    def addCallback(self, name, func):
        self.__callbacks.update({name: func})

    def remCallback(self, name):
        if name in self.__callbacks:
            del self.__callbacks[name]
Пример #9
0
class Controller(service.MultiService):
    targets = {}
    services = []
    binary_light_list = []
    hvac_list = []
    media_player_list = []
    messager = None
    server_list = []
    shutter_list = []
    camera_list = []
    multiscreen_list = []
    dimmable_light_list = []
    ambi_light_list = []
    event_catcher = None
    cloud_event_catcher = None
    subscriptions = {}
    subscriptions_cloud = {}
    searchables = {}
    ready_to_close = False
    current_device = None
    cloud = False
    lan = False
    agent = None

    def __init__(
            self, parent=None, searchables=None, xmldir=None,
            network='lan', cloud_user=None, cloud_servers=[],
            logger=None, uid=None, messager=None):
        self.connected = False
        self.messager = messager
        self.app_paused = False
        self.fail_count = 0
        if not logger:
            self.log = Logger()
        else:
            self.log = logger
        self.log.debug('UPnP controller starts')
        self.xmldir = xmldir
        self.devices = {}
        self._services = {}
        self.parent = parent
#         self.amp = ControllerAmp(self)
        if uid:
            self.uuid = uid
        else:
            self.uuid = str(
                uuid.uuid5(
                    uuid.NAMESPACE_DNS,
                    socket.gethostname() + 'onDemand_Controller'))
        if searchables:
            for typ in searchables:
                self.searchables.update({typ[0]: typ[1]})
#                 print(self.searchables)
        else:
            self.searchables = {'upnp:rootdevice': self.log.debug}
        if network in ('lan', 'both'):
            self.log.debug('UPnP classic enabled')
            self.lan = True
            self.listener = ssdp.SSDP_Listener(self)
            self.mcast = internet.MulticastServer(  # @UndefinedVariable
                SSDP_PORT,
                self.listener,
                listenMultiple=True,
                interface=SSDP_ADDR_V4)
            self.mcast.setServiceParent(self)
            self.ssdp_cli = ssdp.SSDP_Client(
                self, get_default_v4_address(), device=False)
            self.ucast = internet.UDPServer(  # @UndefinedVariable
                0, self.ssdp_cli, self.ssdp_cli.interface)
            self.ucast.setServiceParent(self)
#             self.agent = Agent(reactor)
        if network in ('cloud', 'both'):
            if cloud_user:
                self.log.debug('UPnP Cloud enabled')
                self.cloud = True
                self._jid, secret = cloud_user
                self.users = {self._jid: {'state': True}}
                for user in cloud_servers:
                    self.users.update({user: {'state': False}})
                self.hosts = {}
                self.resourcepart = ''.join((
                    'urn:schemas-upnp-org:cloud-1-0:ControlPoint:1:uuid:',
                    self.uuid))
                full_jid = ''.join(
                    (self._jid, '/', self.resourcepart))
                self.jid = jid = JID(full_jid)
                self.reactor = reactor
                f = client.XMPPClientFactory(jid, secret)
                f.addBootstrap(
                    xmlstream.STREAM_CONNECTED_EVENT, self.cloud_connected)
                f.addBootstrap(
                    xmlstream.STREAM_END_EVENT, self.cloud_disconnected)
                f.addBootstrap(
                    xmlstream.STREAM_AUTHD_EVENT, self.authenticated)
                f.addBootstrap(
                    xmlstream.INIT_FAILED_EVENT, self.cloud_failed)
                self.connector = endpoints.HostnameEndpoint(
                    reactor, jid.host, 5222)
                self.factory = f
#             factory = Factory()
#             factory.protocol = ControllerAmp(self)
#             amp_service = internet.TCPServer(  # @UndefinedVariable
#                 4343, factory)
#             amp_service.setServiceParent(self)
#                 self.connector = SRVConnector(
#                     reactor, 'xmpp-client', jid.host, f, defaultPort=5222)
#         log.startLogging(sys.stdout)

    def startService(self):
        '''
        '''
        service.MultiService.startService(self)
        if self.cloud:
            self.connector.connect(self.factory)
            self.log.debug('Cloud Service started')
        if self.lan:
            t = task.LoopingCall(self.search_devices)
            t.start(15)
            self.log.debug('SSDP Service started')

    def resume(self):
        self.app_paused = False
        if not self.connected:
            if self.cloud:
                self.connector.connect(self.factory)
                self.log.debug('Cloud Service started')
            if self.lan:
                t = task.LoopingCall(self.search_devices)
                t.start(15)
                self.log.debug('SSDP Service started')

    def stopService(self):
        self.log.debug('Stopping controller service...')
        self.clean()
#         d.addCallback(lambda ignored: service.MultiService.stopService(self))
        service.MultiService.stopService(self)
#         reactor.callLater(10, reactor.stop)  # @UndefinedVariable

    def cloud_disconnected(self, reason):
        if not reason:
            reason = 'Unknown'
        self.log.warn('Cloud Server disconnected: %s' % reason)
        self.connected = False
        if not self.app_paused and self.fail_count < 10:
            self.fail_count += 1
            self.resume()

    def cloud_failed(self, failure):
        self.log.error('Cloud Login failed: %s' % str(failure))

#         self.xmlstream.sendFooter()

    def clean(self):
        return reactor.callInThread(  # @UndefinedVariable
            threads.blockingCallFromThread, *(reactor, self.cleanfunc))

    def cleanfunc(self):

        def cleaned(res):
            self.log.debug('cleaned')
            if self.cloud:
                self.xmlstream.sendFooter()
        dl = []
        if self.lan:
            for name in self.subscriptions.keys():
                dl.append(self.unsubscribe(name))
        if self.cloud:
            for name in self.subscriptions_cloud.keys():
                dl.append(self.unsubscribe(name))
        d = defer.DeferredList(dl)
        d.addCallback(cleaned)
        return d

    def cloud_connected(self, xs):
        self.log.debug('Cloud Connected')
        self.fail_count = 0
        self.connected = True
        self._services = {}
        self.subscriptions = {}
        self.xmlstream = xs
#         xs.rawDataInFn = self.rawDataIn

    def authenticated(self, xs):

        self.log.debug('Cloud Authenticated')
        presence = domish.Element((None, 'presence'))
        xs.send(presence)
        xs.addObserver('/presence', self.on_presence)
        xs.addObserver('/iq', self.on_iq)
        xs.addObserver('/message', self.on_event)
        disco = IQ(xs, 'get')
        disco.addElement(('http://jabber.org/protocol/disco#items', 'query'))
        disco.addCallback(self.cloud_discovered)
        disco.send()
#         self.reactor.callLater(120, xs.sendFooter)
        self.reactor.callLater(5, self.check_users)

    def check_users(self):
        for user, value in self.users.items():
            if value['state'] is False:
                iq = IQ(self.xmlstream, 'set')
                query = domish.Element(('jabber:iq:roster', 'query'))
                item = domish.Element((None, 'item'))
                item['name'] = user
                item['jid'] = user
                item.addElement('group', content='hosts')
                query.addChild(item)
                iq.addChild(query)
                iq.addCallback(self.cloud_subscribe, user)
#                 print('send IQ: %s' % (iq.toXml().encode('utf-8')))
                iq.send()

    def cloud_subscribe(self, jid, result):
        self.log.debug('Subscribe callback from %s' % jid)
        presence = domish.Element((None, 'presence'))
        presence['type'] = 'subscribe'
        presence['to'] = jid
        self.xmlstream.send(presence)

    def on_event(self, message):
        if not self.cloud_event_catcher:
            reactor.callLater(1, self.on_event, message)  # @UndefinedVariable
            return
        if message.name == 'iq':
            if message['type'] == 'result':
                try:
                    last = ''
                    for child in message.children[0].children[0].children:
                        last = child.children[0]
                except KeyError:
                    return
#                 print(message.toXml())
#                 print(last.toXml())
                self.cloud_event_catcher.receive(last.toXml().encode('utf-8'))
        elif message.children[0].name == 'event':
            evt = message.children[0]
            items = evt.children[0]
            node_name = str(items['node'])
            if node_name in self.subscriptions_cloud:
                for item in items.children:
                    propertyset = item.children[0]
                    self.cloud_event_catcher.receive(
                        (node_name, propertyset.toXml().encode('utf-8'),))

    def rawDataIn(self, buf):
        print(
            "Device RECV: %s"
            % unicode(buf, 'utf-8').encode('ascii', 'replace'))

    def on_presence(self, resp):
        self.log.debug('got presence: %s' % resp.toXml().encode('utf-8'))
#         print('from :%s' % resp['from'])
        user, host, res = parse(resp['from'])
        jid = '@'.join((user, host))
        if resp.hasAttribute('type'):
            if resp['type'] == 'subscribed':
                if jid in self.users:
                    self.users[jid].update({'state': True})
                    if 'services' in self.users[jid]:
                        self.users[jid]['services'].append(res)
                    else:
                        self.users[jid].update({'services': [res]})
                    presence = domish.Element((None, 'presence'))
                    presence['type'] = 'subscribe'
                    presence['to'] = resp['from']
                    self.xmlstream.send(presence)
                else:
                    presence = domish.Element((None, 'presence'))
                    presence['type'] = 'denying'
                    presence['to'] = resp['from']
                    self.xmlstream.send(presence)
            elif resp['type'] == 'unsubscribed':
                if jid in self.users:
                    self.log.warn('subscription failed: %s' % resp['from'])
                return
        for child in resp.elements():
            if child.name == 'ConfigIdCloud':
                self.log.debug('Found UPnP Cloud device : %s type is: %s' % (
                    jid,
                    res))
                info = IQ(self.xmlstream, 'get')
#                 info['to'] = resp['from']
                query = domish.Element(
                    ('urn:schemas-upnp-org:cloud-1-0', 'query'))
                query['type'] = 'description'
                query['name'] = ':'.join(res.split(':')[-2:])
                info.addChild(query)
                info.addCallback(self.on_description, res)
#                 info.send()
                info.send(to=resp['from'])

    def on_description(self, resource, iq):
        location = iq['from']
        clbk = self.searchables[
            self.searchables.keys()[0]]
        if iq['type'] == 'result':
            if iq.children[0].name == 'query'\
                    and iq.children[0]['type'] == 'described':
                self.update_devices(
                    resource,
                    location,
                    clbk,
                    xml=iq.children[0].children[0].toXml())

    def cloud_discovered(self, iq):
        self.log.debug('Discovered item: %s' % iq.toXml().encode('utf-8'))
        if iq['type'] == 'result':
            for child in iq.children:
                if child.name == 'query':
                    for grandchild in child.children:
                        if grandchild['jid'].encode('utf-8') == self.full_jid:
                            continue
                        if grandchild['name'].encode('utf-8')\
                                in self.hosts:
                            self.hosts[
                                grandchild['name'].encode('utf-8')].append(
                                    grandchild['jid'].encode('utf-8'))
                        else:
                            self.hosts.update(
                                {grandchild['name'].encode('utf-8'):
                                    [grandchild['jid'].encode('utf-8')]})
#         print(self.hosts)

    def on_iq(self, iq):
        pass
#         print('got iq: %s' % iq.toXml())
#         try:
#             print('from :%s' % iq['from'])
#         except KeyError:
#             print('From I don\'t know: %s' % iq.toXml())
#         print('type: %s' % iq['type'])

    def search_devices(self):
        for search in self.searchables:
            self.ssdp_cli.send_MSEARCH(search, uuid=self.uuid)

    def update_hosts(self, host, unicast=False):

        if 'location' in host:
            if 'usn' in host:
                if host['usn'] in self.devices:
                    return
                device = host['usn'].split('::')
                if len(device) > 1:
                    uid = device[0].split(':')[1]
                    if uid in self.devices:
                        return
                    typ = device[1]
                    if typ in self.searchables:
                        self.update_devices(
                            uid, host['location'], self.searchables[typ])
#                         self.devices.append(uid)

    def send_message(self, message_type, name, id_, value):
        if self.messager:
            if isinstance(value, dict):
                self.messager.callRemote(message_type,
                                         name=name,
                                         id_=id_,
                                         value=json.dumps(value))

#                 for v in value.iteritems():
#                     if not v or isinstance(v, dict):
#                         print('zap')
#                         continue
#                     print(v)
#                     self.messager.callRemote(message_type,
#                                              name=name,
#                                              id_=id_,
#                                              value=':'.join((k, v)))
            else:
                self.messager.callRemote(message_type,
                                         name=name,
                                         id_=id_,
                                         value=value)

    def update_devices(self, uid, location, callback_fct, xml=None):
        def device_parsed(dic):
            self.devices.update(dic)
            if callable(callback_fct):
                callback_fct(dic)
            else:
                self.send_message(Event, callback_fct, uid, dic[uid])
                if self.messager:
                    self.messager.parent.notify(
                        'New Device detected:', dic[uid]['name'])
        uid = bytes(uid)
        self.log.debug('new device %s: %s' % (uid, location))
        if '@' in location:
            if xml:
                device_parsed(self.parse_host(xml, location, uid))
                return
        else:
            if not self.agent:
                self.agent = Agent(reactor)
            d = self.agent.request('GET', location)
            d.addCallback(readBody)
        d.addCallback(self.parse_host, *(location, uid))
        d.addCallback(device_parsed)

    def parse_host(self, xml, location, uid):
        typ = 'upnp'
        loc = None
        if '@' in location:
            url_prefix = ''.join(('xmpp://', location))
            net = 'cloud'
        else:
            url_prefix = urlparse(location).netloc
            net = 'lan'
        try:
            root = et.fromstring(xml)
        except:
            self.log.error('bad xml: %s' % xml)
            return {}
        host = {}
        icon = None
        for children in root:
            if children.tag.split('}')[-1] == 'device':
                for att in children:
                    if att.tag.split('}')[-1] == 'friendlyName':
                        fname = att.text
                    if att.tag.split('}')[-1] == 'deviceType':
                        devtype = att.text
                        if 'Source' in att.text:
                            typ = 'oh'
                    if att.tag.split('}')[-1] == 'iconList':
                        for ico in att:
                            #  log.debug(ico)
                            for info in ico:
                                if info.tag.split('}')[-1] == 'width':
                                    if int(info.text) <= 96:
                                        if ico[4].text.startswith('/'):
                                            icon = 'http://'\
                                                + url_prefix\
                                                + ico[4].text
                                        else:
                                            icon = ico[4].text
                    if att.tag.split('}')[-1] == 'serviceList':
                        svc = {}
                        for serv in att:
                            d = {}
                            for info in serv:
                                if 'URL' in info.tag.split('}')[-1]:
                                    if net == 'lan':
                                        d.update({info.tag.split('}')[-1]:
                                                  'http://' +
                                                  url_prefix + info.text})
                                    else:
                                        d.update(
                                            {info.tag.split('}')[-1]:
                                             url_prefix + info.text})
                                else:
                                    d.update(
                                        {info.tag.split('}')[-1]: info.text})
                            svc.update({d['serviceType']: d})
                    if att.tag.split('}')[-1] == 'X_location':
                        loc = att.text
        host.update(
            {uid: {
                'name': fname,
                'devtype': devtype,
                'icon': icon,
                'services': svc,
                'type': typ,
                'network': net,
                'location': location,
                'loc': loc}})
#         log.debug(host)
        return host

    def subscribe(self, *args, **kwargs):
        if args[0][args[0].keys()[0]]['network'] == 'lan':
            return self.subscribe_classic(*args, **kwargs)
        else:
            return self.subscribe_cloud(*args, **kwargs)

    def subscribe_classic(
            self, device, svc, var, callback_fct=None,
            callback_args=()):
        if not callback_fct:
            callback_fct = self.log.debug
        name = device.keys()[0]
        dev = device[name]

        def subscribe_failed(err, name):
            self.parent.remove_device(name.split('_')[0])

        def subscribed(req, raddr, host, name):
            try:
                uuid = req.headers.getRawHeaders('sid')[0]
                print('subscription uuid = %s' % uuid)
                if name in self.subscriptions:
                    if host in self.subscriptions[name]:
                        self.subscriptions[name][host].update({uuid: raddr})
                    else:
                        self.subscriptions[name].update({host: {uuid: raddr}})
                else:
                    self.subscriptions.update({name: {host: {uuid: raddr}}})
                reactor.callLater(  # @UndefinedVariable
                    20, self.renew_subscription, uuid)
                return name
            except TypeError:
                return subscribe_failed(None, name)

        if self.event_catcher is None:
            self.event_catcher = EventServer()
            self.event_catcher.setServiceParent(self)
        subscription_id = '_'.join((name, svc.split(':')[-2]))
        childpath = '_'.join((subscription_id, 'event',))
#         log.error(childpath)
        if childpath in self.event_catcher.catcher.childs:
            self.event_catcher.catcher.childs[childpath].update(
                {var: (callback_fct, callback_args,)})
        else:
            self.event_catcher.catcher.childs.update(
                {childpath: {var: (callback_fct, callback_args,)}})
#         log.error(self.event_catcher.catcher.childs)
        if subscription_id in self.subscriptions:
            for k, value in self.event_catcher.catcher.unfiltered.items():
                if k == var:
                    if value == 'False':
                        value = False
                    elif value == 'True':
                        value = True
                    if isinstance(callback_args, str)\
                            or isinstance(callback_args, bool):
                        callback_fct(value, callback_args)
                    else:
                        callback_fct(value, *callback_args)
                    del self.event_catcher.catcher.unfiltered[k]
            return defer.succeed(None)
        else:
            self.subscriptions.update({subscription_id: {}})
        clbk = '<' + 'http://' + get_default_v4_address() + ':' +\
            str(self.event_catcher.getPort()) + '/' + childpath + '>'
#             print(clbk)
        headers = {'HOST': [get_default_v4_address() + ':' +
                            str(self.event_catcher.getPort())],
                   'CALLBACK': [clbk],
                   'NT': ['upnp:event'],
                   'TIMEOUT': ['Second-25']}
        if svc in dev['services']:
            self.log.error(svc)
            addr = dev['services'][svc]['eventSubURL']
            self.log.error(addr)
            d = self.agent.request(
                'SUBSCRIBE',
                addr,
                Headers(headers))
            d.addCallbacks(
                subscribed,
                subscribe_failed,
                callbackArgs=(addr, headers['HOST'][0], subscription_id),
                errbackArgs=(subscription_id,))
            return d
#         log.error(dev['services'])
        return defer.fail(Exception('Service unknow'))

    def renew_subscription(self, sid):

        def renewed(res):
            #             print('subscription %s successfully renewed' % sid)
            reactor.callLater(  # @UndefinedVariable
                20, self.renew_subscription, sid)

        def failed(res):
            for name in self.subscriptions:
                for host in self.subscriptions[name]:
                    if sid in self.subscriptions[name][host]:
                        del self.subscriptions[name][host][sid]
                        self.parent.remove_device(name.split('_')[0])
        for name in self.subscriptions:
            for host in self.subscriptions[name]:
                if sid in self.subscriptions[name][host]:
                    headers = {'HOST': [host], 'SID': [sid],
                               'TIMEOUT': ['Second-25']}
                    d = self.agent.request(
                        'SUBSCRIBE',
                        self.subscriptions[name][host][sid],
                        Headers(headers))
                    d.addCallbacks(renewed, failed)
                    return d

    def unsubscribe(self, name):
        print('unsuscribe: %s' % name)
        deferreds = []
        if name in self.subscriptions:
            for host in self.subscriptions[name]:
                for sid in self.subscriptions[name][host]:
                    deferreds.append(self.unsubscribe_host(
                        sid,
                        host,
                        self.subscriptions[name][host][sid], name))
        if name in self.subscriptions_cloud:
            return self.unsubscribe_cloud(name)
        if len(deferreds) > 0:
            #             print(deferreds)
            d = defer.DeferredList(deferreds)
        else:
            d = defer.succeed('nothing to do')
        return d

    def unsubscribe_cloud(self, name):

        def unsubscribed(name, d, res):
            if res['type'] == 'result':
                #                 print('unsubscribed: %s' % name)
                del self.subscriptions_cloud[name]
                print('ok')
                d.callback(None)
            else:
                d.errback(Exception(res.toXml()))

        d = defer.Deferred()
        iq = IQ(self.xmlstream, 'set')
        ps = domish.Element(('http://jabber.org/protocol/pubsub', 'pubsub'))
        unsubscribe = domish.Element((None, 'unsubscribe'))
        unsubscribe['node'] = name
        unsubscribe['jid'] = self.jid.full()
        ps.addChild(unsubscribe)
        iq.addChild(ps)
        iq.addCallback(unsubscribed, name, d)
        iq.send(to='pubsub.' + self.jid.host)
        return d

    def unsubscribe_host(self, sid, host, addr, name=None):
        #  log.debug(
        #     'unsubscribe uuid host addr: %s %s %s' % (sid, host, addr))

        def unsubscribed(res):
            #             print('subscription %s successfully cancelled' % sid)
            if name:
                if len(self.subscriptions[name][host]) == 1:
                    del self.subscriptions[name]
                else:
                    del self.subscriptions[name][host][sid]
            return res

        headers = {'HOST': [host], 'SID': [sid]}
        d = self.agent.request(
            'UNSUBSCRIBE',
            addr,
            Headers(headers))
        d.addCallback(unsubscribed)
        return d

    def subscribe_cloud(
            self, device, svc, var, callback_fct=None, callback_args=()):
        #         print('suscribe to %s' % var)
        name = device.keys()[0]
        dev = device[name]
        if not callback_fct:
            callback_fct = self.log.debug
        d = defer.Deferred()

        def subscribe_failed(err, name):
            self.parent.remove_device(name.split('_')[0])

        def subscribed(node_name, deferred, iq):
            if iq['type'] == 'result':
                self.subscriptions_cloud[str(node_name)] = True
#                 print('%s suscribed !' % str(node_name))
#                 iq = IQ(self.xmlstream, 'get')
#                 ps = domish.Element(
#                     ('http://jabber.org/protocol/pubsub', 'pubsub'))
#                 items = domish.Element((None, 'items'))
#                 items['node'] = node_name
#                 items['max_items'] = '1'
#                 ps.addChild(items)
#                 iq.addChild(ps)
#                 iq.addCallback(self.on_event)
#                 iq.send(to='pubsub.' + self.jid.host)
#                 print(iq.toXml())
                deferred.callback(str(node_name))
            else:
                deferred.errback(Exception('subscription to %s failed: %s'
                                           % (node_name, iq.toXml())))

        if svc in dev['services']:
            #             print('service %s ok' % svc)
            #             print('subscriptions :%s' % self.subscriptions_cloud)
            if not self.cloud_event_catcher:
                self.cloud_event_catcher = CloudEventCatcher(
                    {}, {}, logger=self.log)
            subscription_name = '/'.join((dev['location'], svc, var))
            #  subscription_service = svc
            if subscription_name in self.cloud_event_catcher.callbacks:
                self.cloud_event_catcher.callbacks[subscription_name].update(
                    {var: (callback_fct, callback_args,)})
            else:
                self.cloud_event_catcher.callbacks.update(
                    {subscription_name: {var: (callback_fct, callback_args,)}})
#             if var in self.cloud_event_catcher.callbacks:
#                 self.cloud_event_catcher.callbacks[var].update(
#                     {var: (callback_fct, callback_args,)})
#             else:
#                 self.cloud_event_catcher.callbacks.update(
#                     {var: {var: (callback_fct, callback_args,)}})
    #         log.error(self.event_catcher.catcher.childs)
            if subscription_name in self.subscriptions_cloud:
                if self.subscriptions_cloud[subscription_name]:
                    #                     print('already subscribed: %s' % subscription_name)
                    for k, value in\
                            self.cloud_event_catcher.unfiltered_dict.items():
                        #                         print('is %s == %s ?' % (k, var))
                        if k == var:
                            if value == 'False':
                                value = False
                            elif value == 'True':
                                value = True
                            if isinstance(callback_args, str)\
                                    or isinstance(callback_args, bool):
                                callback_fct(value, callback_args)
                            else:
                                callback_fct(value, *callback_args)
                            del self.cloud_event_catcher.unfiltered_dict[k]
                    return defer.succeed(None)
            self.subscriptions_cloud.update({str(subscription_name): False})
#             print(subscription_name)
#             print(subscription_service)
            iq = IQ(self.xmlstream, 'set')
            ps = domish.Element(
                ('http://jabber.org/protocol/pubsub', 'pubsub'))
            subscribe = domish.Element((None, 'subscribe'))
            subscribe['node'] = subscription_name
            subscribe['jid'] = self.jid.full()
            ps.addChild(subscribe)
            iq.addChild(ps)
            iq.addCallback(subscribed, subscription_name, d)
            iq.send(to='pubsub.' + self.jid.host)
            return d
        return defer.fail(Exception('Service unknow'))

    def get_client(self, device, service):
        if self.xmldir is not None:
            client = None
        else:
            import importlib
            module_name = service.split(':')[-2]
            app = getattr(importlib.import_module(
                'upnpy_spyne.services.templates.' + module_name.lower()),
                module_name)
            if device['network'] == 'lan':
                client = Client(
                    device['services'][service]['controlURL'],
                    Application([app], app.tns,
                                in_protocol=Soap11(), out_protocol=Soap11()))
                client.set_options(
                    out_header={'Content-Type': ['text/xml;charset="utf-8"'],
                                'Soapaction': [app.tns]})
            else:
                url = (self.xmlstream, device['location'],)
                client = Client(
                    url,
                    Application([app], app.tns,
                                in_protocol=Soap11(xml_declaration=False),
                                out_protocol=Soap11(xml_declaration=False)),
                    cloud=True)
#                 print('**********%s' % service)
#                 print(device['services'][service])
        return client

    def call(self, device, service, func, params=()):
        if isinstance(device, dict):
            devname = device.keys()[0]
            dev = device[devname]
        else:
            devname = device
            dev = self.devices[device]
        if devname not in self._services:
            client = self.get_client(dev, service)
            self._services.update({devname: {service: client.service}})
        elif service not in self._services[devname]:
            client = self.get_client(dev, service)
            self._services[devname].update({service: client.service})
        try:
            f = getattr(
                self._services[devname][service], func)
        except AttributeError:
            self.log.error(
                'function %s not found for service %s' % (func, service))
            return defer.fail(Exception(
                'function %s not found for service %s' % (func, service)))
        try:
            if len(params) > 0:
                if isinstance(params, str):
                    d = f(params)
                else:
                    d = f(*params)
            else:
                d = f()
        except TypeError:
            #  boolean has no len
            d = f(params)
        d.addErrback(
            lambda failure, fname: self.log.error(
                '%s call failed : %s' % (fname, failure.getErrorMessage())),
            func)
        return d
Пример #10
0
class EventSubscription(object):
    __slots__ = ['sid',
                 'callback',
                 'timeout',
                 'last_subscribe',
                 'next_notify_key',
                 'expired',
                 '__dict__']

    def __init__(self, sid, callback, timeout):
        self.log = Logger()
        self.sid = sid
        self.callback_addr = callback
        self.timeout = timeout
        self.last_subscribe = time.time()
        self.next_notify_key = 0
        self.expired = False  # subscription has been flagged for deletion
        self.agent = Agent(reactor)
        self.pending_events = {}
        self.pending = False

    def _increment_notify_key(self):
        if self.next_notify_key >= 4294967295:
            self.next_notify_key = 0
        else:
            self.next_notify_key += 1

    def check_expiration(self):
        if self.expired is True:
            return True

        if time.time() > self.last_subscribe + self.timeout:
            self.expired = True
            return True

        return False

    def send_notify(self):

        self.pending = False
        if len(self.pending_events) == 0:
            return
        PREFIX = "{urn:schemas-upnp-org:event-1-0}"
        _propertyset = et.Element(
            'propertyset',
            nsmap={'e': 'urn:schemas-upnp-org:event-1-0'})
#         _propertyset = et.Element(
#             'e:propertyset',
#             attrib={'xmlns:e': 'urn:schemas-upnp-org:event-1-0'})
        for prop in self.pending_events.values():
            if prop.namespace is not None:
                et.register_namespace('e', prop.namespace)
            _property = et.SubElement(_propertyset, PREFIX + 'property')
#             log.msg('Child xml = %s' % prop.value)
#             _property.append(make_element(prop.name, str(prop.value)))
            try:
                evt = et.Element(prop.name)
                if prop.name == 'LastChange':
                    if prop.namespace is None:
                        ev = et.Element('Event')
                    else:
                        ev = et.Element('Event',
                                        attrib={'xmlns': prop.namespace})
                    inst = et.Element('InstanceID', attrib={'val': "0"})
                    prefix = ''
                    for n in prop.value:
                        if 'namespace' in prop.value[n]:
                            prefix = '%s:' % n[0]
                            et.register_namespace(prefix,
                                                  prop.value[n]['namespace'])
                        if 'attrib' in prop.value[n]:
                            attr = prop.value[n]['attrib']
                        else:
                            attr = {}
                        attr.update(
                            {'val': str(prop.value[n]['value'])
                             .decode('utf-8')})
                        var = et.Element(prefix + n, attrib=attr)
#                         var.text = str(prop.value[n]['value'])
                        inst.append(var)
                    ev.append(inst)
#                     evt.append(ev)
                    evt.text = et.tostring(ev)
                else:
                    #  log.err('%s - %s' % (prop.name, prop.value))
                    evt.text = str(prop.value).decode('utf-8')
                _property.append(evt)
            except:
                self.log.debug(
                    'Malformed XML Event: %s' % dir(prop))
                return
            _propertyset.append(_property)
        headers = {
            'NT': ['upnp:event'],
            'NTS': ['upnp:propchange'],
            'SID': [self.sid],
            'SEQ': [str(self.next_notify_key)],
            'Content-Type': ['text/xml']
        }
        data = StringIO(''.join(('<?xml version="1.0" ',
                                 'encoding="utf-8" ',
                                 'standalone="yes"?>',
                                 et.tostring(_propertyset))))
#         log.err("Event TCP Frame Data: %s" % data)
        body = FileBodyProducer(data)

        def notify_failed(err):
            self.log.debug(
                'Notify failed: %s --- %s'
                % (err.type, err.getErrorMessage()))
            self.expired = True
#         log.err(self.callback_addr)
        d = self.agent.request(
            'NOTIFY',
            self.callback_addr,
            Headers(headers),
            body)
        d.addCallbacks(lambda ignored: data.close(), notify_failed)
#         d.addErrback(notify_failed)
        self._increment_notify_key()
        self.pending_events = {}
        return d

    def notify(self, prop):
        """

        :type props: EventProperty or list of EventProperty
        """
        #         log.msg('notify')
        if self.expired:
            return
        if self.check_expiration():
            self.log.debug("(%s) subscription expired" % self.sid)
            return
        if isinstance(self.callback_addr, str):
            if prop.name == 'LastChange':
                if prop.name in self.pending_events:
                    self.pending_events[prop.name].value.update(prop.value)
                else:
                    self.pending_events.update({prop.name: prop})
            else:
                self.pending_events.update({prop.name: prop})
            if not self.pending:
                self.pending = True
                reactor.callLater(0.5,  # @UndefinedVariable
                                  self.send_notify)
        else:
            self.callback_addr.publish((prop.name, prop,))
Пример #11
0
class MpdProtocol(LineReceiver):
    """
    Twisted protocol to control remote mpd server
    """

    def __init__(self):
        """
        doc
        """
        self.log = Logger()
        self.delimiter = "\n"
        self.deferreds = []
        self.buff = {}
        self.idle = False
        self.list_index = 0

    def connectionLost(self, reason):
        self.log.error("connection lost : {reason}", reason=reason)
        self._event({"changed": "disconnected"})
        self.idle = False
        try:
            d = self.deferreds.pop(0)
        except:
            pass
        else:
            d.errback(reason)

    def connectionMade(self):
        self.log.debug("connected")

    def addCallback(self, d):
        self.deferreds.append(d)

    def noidle(self):
        d = defer.Deferred()
        d.addCallback(lambda ignored: ignored)
        self.deferreds.insert(0, d)
        self.sendLine("noidle")
        self.idle = False

    #         print('noidle')

    def set_idle(self):
        self.sendLine("idle")
        self.idle = True

    #         print('idle')

    def lineReceived(self, line):
        #  print(line)
        if line.startswith("OK MPD"):
            self._event({"changed": "connected"})
        elif line.startswith("OK"):
            #             print('deferred length: %d' % len(self.deferreds))
            self.list_index = 1
            try:
                d = self.deferreds.pop(0)
            except:
                self.set_idle()
                self._event(self.buff)
                self.buff = {}
                return
            else:
                d.callback(self.buff)
            self.buff = {}
        elif line.startswith("ACK"):
            #             print('deferred length: %d' % len(self.deferreds))
            try:
                d = self.deferreds.pop(0)
            except:
                self.set_idle()
                self._event({"Error": line.split("}")[1]})
                self.buff = {}
                return
            else:
                d.errback(Exception(line.split("}")[1]))
            self.buff = {}
        else:
            if len(line) > 0:
                k = line.split(":")[0]
                if isinstance(self.buff, list):
                    if k in self.buff[self.list_index]:
                        self.list_index += 1
                        self.buff.append({})
                    self.buff[self.list_index].update({k: " ".join(line.split()[1:])})
                else:
                    if k in self.buff:
                        self.buff = [self.buff]
                        self.list_index = 1
                        self.buff.append({k: " ".join(line.split()[1:])})
                    #                         if str(self.list_index) + k in self.buff:
                    #                             self.list_index += 1
                    #                         self.buff.update(
                    #                             {str(self.list_index) + line.split(':')[0]:
                    #                              ' '.join(line.split()[1:])})
                    else:
                        self.buff.update({k: " ".join(line.split()[1:])})
            return
        if len(self.deferreds) == 0:
            self.set_idle()
Пример #12
0
class SmsFactory(ClientFactory, Client):
    room = 'NA'
    actions = ('sendsms, readsms')

    def __init__(self, event_fct=None):
        self.protocol = serialLineProtocol()
        self.uid = uuid.uuid4()
        self.protocol.factory = self
        self.log = Logger()
        self.first = True
        self.event = event_fct
        self.callback = None
        self.wait = False
        self.response = ''
        self.resp_re = re.compile(
            r'^OK|ERROR|(\+CM[ES] ERROR: \d+)|(COMMAND NOT SUPPORT)$')

    def receive(self, line):
        if self.wait:
            if self.resp_re.match(line):
                self.wait = False
                self.response.append(line)
                if line.startswith('ERROR'):
                    self.log.critical('error from Modem: %s' % line)
                    if self.callback:
                        self.callback.errback(self.response)
                else:
                    if self.callback:
                        self.callback.callback(self.response)
                self.response = ''
                if self.callback:
                    self.callback = None
            else:
                self.response.append(line)
        elif self.event:
            self.event(line)
        else:
            self.log.debug('unmanaged message from Modem: %s' % line)

    def sendsms(self, recipient, message, callback_fct=None):
        def recipient_set(res):
            self.log.debug('do we have > ? ==> %s' %
                           ('OK' if res == '>' else 'No: ' + res))
            self.callback = defer.Deferred
            if callback_fct:
                self.callback.addCallback(callback_fct)
            self.wait = True
            self.protocol.send(message + b'\x1a')

        def text_mode(res):
            self.callback = defer.Deferred
            self.callback.addCallback(recipient_set)
            self.wait = True
            self.protocol.send(b'AT+CMGS="' + recipient.encode() + b'"\r')

        def modem_init(res):
            self.first = False
            self.callback = defer.Deferred
            self.callback.addCallback(text_mode)
            self.wait = True
            self.protocol.send(b'AT+CMGF=1\r')

        if self.first:
            self.wait = True
            self.callback = defer.Deferred()
            self.callback.addCallback(modem_init)
            self.protocol.send(b'ATZ\r')
        else:
            modem_init('OK')

    def _write(self, txt):
        self.protocol.send(txt.encode())
Пример #13
0
class BlockchainInterface:
    """
    Interacts with a solidity compiler and a registry in order to instantiate compiled
    ethereum contracts with the given web3 provider backend.
    """

    TIMEOUT = 180  # seconds
    NULL_ADDRESS = '0x' + '0' * 40

    _instance = NO_BLOCKCHAIN_CONNECTION.bool_value(False)
    process = NO_PROVIDER_PROCESS.bool_value(False)
    Web3 = Web3

    _contract_factory = Contract

    class InterfaceError(Exception):
        pass

    class NoProvider(InterfaceError):
        pass

    class ConnectionFailed(InterfaceError):
        pass

    class UnknownContract(InterfaceError):
        pass

    def __init__(self,
                 poa: bool = True,
                 provider_process: NuCypherGethProcess = NO_PROVIDER_PROCESS,
                 provider_uri: str = NO_BLOCKCHAIN_CONNECTION,
                 transacting_power: TransactingPower = READ_ONLY_INTERFACE,
                 provider: Web3Providers = NO_BLOCKCHAIN_CONNECTION,
                 registry: EthereumContractRegistry = None):
        """
        A blockchain "network interface"; The circumflex wraps entirely around the bounds of
        contract operations including compilation, deployment, and execution.

         Filesystem          Configuration           Node              Client                  EVM
        ================ ====================== =============== =====================  ===========================

         Solidity Files -- SolidityCompiler ---                  --- HTTPProvider ------ ...
                                               |                |
                                               |                |

                                                 *Blockchain* -- IPCProvider ----- External EVM (geth, parity...)

                                               |      |         |
                                               |      |         |
         Registry File -- ContractRegistry ---        |          ---- TestProvider ----- EthereumTester
                                                      |
                        |                             |                                         |
                        |                             |
                                                                                        PyEVM (Development Chain)
         Runtime Files --                 -------- Blockchain
                                         |
                        |                |             |

         Key Files ------ CharacterConfiguration -------- Agent ... (Contract API)

                        |                |             |
                        |                |
                        |                 ---------- Actor ... (Blockchain-Character API)
                        |
                        |                              |
                        |
         Config File ---                           Character ... (Public API)

                                                       |

                                                     Human


        The Blockchain is the junction of the solidity compiler, a contract registry, and a collection of
        web3 network providers as a means of interfacing with the ethereum blockchain to execute
        or deploy contract code on the network.


        Compiler and Registry Usage
        -----------------------------

        Contracts are freshly re-compiled if an instance of SolidityCompiler is passed; otherwise,
        The registry will read contract data saved to disk that is be used to retrieve contact address and op-codes.
        Optionally, A registry instance can be passed instead.


        Provider Usage
        ---------------
        https: // github.com / ethereum / eth - tester     # available-backends


        * HTTP Provider - Web3 HTTP provider, typically JSON RPC 2.0 over HTTP
        * Websocket Provider - Web3 WS provider, typically JSON RPC 2.0 over WS, supply endpoint uri and websocket=True
        * IPC Provider - Web3 File based IPC provider transported over standard I/O
        * Custom Provider - A pre-initialized web3.py provider instance to attach to this interface

        """

        self.log = Logger('Blockchain')
        self.poa = poa
        self.provider_uri = provider_uri
        self._provider = provider
        self._provider_process = provider_process
        self.client = NO_BLOCKCHAIN_CONNECTION
        self.transacting_power = transacting_power
        self.registry = registry
        BlockchainInterface._instance = self

    def __repr__(self):
        r = '{name}({uri})'.format(name=self.__class__.__name__,
                                   uri=self.provider_uri)
        return r

    @classmethod
    def from_dict(cls, payload: dict, **overrides) -> 'BlockchainInterface':

        # Apply overrides
        payload.update({k: v for k, v in overrides.items() if v is not None})

        registry = EthereumContractRegistry(
            registry_filepath=payload['registry_filepath'])
        blockchain = cls(provider_uri=payload['provider_uri'],
                         registry=registry)
        return blockchain

    def to_dict(self) -> dict:
        payload = dict(provider_uri=self.provider_uri,
                       poa=self.poa,
                       registry_filepath=self.registry.filepath)
        return payload

    def _configure_registry(self, fetch_registry: bool = True) -> None:
        RegistryClass = EthereumContractRegistry._get_registry_class(
            local=self.client.is_local)
        if fetch_registry:
            registry = RegistryClass.from_latest_publication()
        else:
            registry = RegistryClass()
        self.registry = registry
        self.log.info("Using contract registry {}".format(
            self.registry.filepath))

    @property
    def is_connected(self) -> bool:
        """
        https://web3py.readthedocs.io/en/stable/__provider.html#examples-using-automated-detection
        """
        if self.client is NO_BLOCKCHAIN_CONNECTION:
            return False
        return self.client.is_connected

    def disconnect(self) -> None:
        if self._provider_process:
            self._provider_process.stop()
        self._provider_process = NO_PROVIDER_PROCESS
        self._provider = NO_BLOCKCHAIN_CONNECTION
        BlockchainInterface._instance = NO_BLOCKCHAIN_CONNECTION

    @classmethod
    def reconnect(cls, *args, **kwargs) -> 'BlockchainInterface':
        return cls._instance

    def attach_middleware(self):

        # For use with Proof-Of-Authority test-blockchains
        if self.poa is True:
            self.log.debug('Injecting POA middleware at layer 0')
            self.client.inject_middleware(geth_poa_middleware, layer=0)

    def connect(self,
                fetch_registry: bool = True,
                sync_now: bool = False,
                emitter: StdoutEmitter = None):

        # Spawn child process
        if self._provider_process:
            self._provider_process.start()
            provider_uri = self._provider_process.provider_uri(scheme='file')
        else:
            provider_uri = self.provider_uri
            self.log.info(
                f"Using external Web3 Provider '{self.provider_uri}'")

        # Attach Provider
        self._attach_provider(provider=self._provider,
                              provider_uri=provider_uri)
        self.log.info("Connecting to {}".format(self.provider_uri))
        if self._provider is NO_BLOCKCHAIN_CONNECTION:
            raise self.NoProvider(
                "There are no configured blockchain providers")

        # Connect if not connected
        try:
            self.w3 = self.Web3(provider=self._provider)
            self.client = Web3Client.from_w3(w3=self.w3)
        except requests.ConnectionError:  # RPC
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is RPC enabled?'
            )
        except FileNotFoundError:  # IPC File Protocol
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is IPC enabled?'
            )
        else:
            self.attach_middleware()

        # Establish contact with NuCypher contracts
        if not self.registry:
            self._configure_registry(fetch_registry=fetch_registry)

        # Wait for chaindata sync
        if sync_now:
            sync_state = self.client.sync()
            if emitter:
                import click
                emitter.echo(
                    f"Syncing: {self.client.chain_name.capitalize()}. Waiting for sync to begin."
                )

                while not len(self.client.peers):
                    emitter.echo("waiting for peers...")
                    time.sleep(5)

                peer_count = len(self.client.peers)
                emitter.echo(
                    f"Found {'an' if peer_count == 1 else peer_count} Ethereum peer{('s' if peer_count>1 else '')}."
                )

                try:
                    emitter.echo("Beginning sync...")
                    initial_state = next(sync_state)
                except StopIteration:  # will occur if no syncing needs to happen
                    emitter.echo("Local blockchain data is already synced.")
                    return True

                prior_state = initial_state
                total_blocks_to_sync = int(initial_state.get(
                    'highestBlock', 0)) - int(
                        initial_state.get('currentBlock', 0))
                with click.progressbar(length=total_blocks_to_sync,
                                       label="sync progress") as bar:
                    for syncdata in sync_state:
                        if syncdata:
                            blocks_accomplished = int(
                                syncdata['currentBlock']) - int(
                                    prior_state.get('currentBlock', 0))
                            bar.update(blocks_accomplished)
                            prior_state = syncdata
            else:
                try:
                    for syncdata in sync_state:
                        self.client.log.info(
                            f"Syncing {syncdata['currentBlock']}/{syncdata['highestBlock']}"
                        )
                except TypeError:  # it's already synced
                    return True

        return self.is_connected

    @property
    def provider(self) -> Union[IPCProvider, WebsocketProvider, HTTPProvider]:
        return self._provider

    def _attach_provider(self,
                         provider: Web3Providers = None,
                         provider_uri: str = None) -> None:
        """
        https://web3py.readthedocs.io/en/latest/providers.html#providers
        """

        if not provider_uri and not provider:
            raise self.NoProvider("No URI or provider instances supplied.")

        if provider_uri and not provider:
            uri_breakdown = urlparse(provider_uri)

            if uri_breakdown.scheme == 'tester':
                providers = {
                    'pyevm': _get_tester_pyevm,
                    'geth': _get_test_geth_parity_provider,
                    'parity-ethereum': _get_test_geth_parity_provider,
                }
                provider_scheme = uri_breakdown.netloc

            else:
                providers = {
                    'auto': _get_auto_provider,
                    'infura': _get_infura_provider,
                    'ipc': _get_IPC_provider,
                    'file': _get_IPC_provider,
                    'ws': _get_websocket_provider,
                    'http': _get_HTTP_provider,
                    'https': _get_HTTP_provider,
                }
                provider_scheme = uri_breakdown.scheme
            try:
                self._provider = providers[provider_scheme](provider_uri)
            except KeyError:
                raise ValueError(
                    f"{provider_uri} is an invalid or unsupported blockchain provider URI"
                )
            else:
                self.provider_uri = provider_uri or NO_BLOCKCHAIN_CONNECTION
        else:
            self._provider = provider

    def send_transaction(
        self,
        contract_function: ContractFunction,
        sender_address: str,
        payload: dict = None,
    ) -> dict:

        if self.transacting_power is READ_ONLY_INTERFACE:
            raise self.InterfaceError(str(READ_ONLY_INTERFACE))

        #
        # Build
        #

        if not payload:
            payload = {}

        nonce = self.client.w3.eth.getTransactionCount(sender_address)
        payload.update({
            'chainId': int(self.client.net_version),
            'nonce': nonce,
            'from': sender_address,
            'gasPrice': self.client.gas_price,
            # 'gas': 0,  # TODO: Gas Management
        })

        # Get interface name
        deployment = True if isinstance(contract_function,
                                        ContractConstructor) else False

        try:
            transaction_name = contract_function.fn_name.upper()
        except AttributeError:
            if deployment:
                transaction_name = 'DEPLOY'
            else:
                transaction_name = 'UNKNOWN'

        payload_pprint = dict(payload)
        payload_pprint['from'] = to_checksum_address(payload['from'])
        payload_pprint = ', '.join("{}: {}".format(k, v)
                                   for k, v in payload_pprint.items())
        self.log.debug(f"[TX-{transaction_name}] | {payload_pprint}")

        # Build transaction payload
        try:
            unsigned_transaction = contract_function.buildTransaction(payload)
        except ValidationError as e:
            # TODO: Handle validation failures for gas limits, invalid fields, etc.
            self.log.warn(f"Validation error: {e}")
            raise
        else:
            if deployment:
                self.log.info(
                    f"Deploying contract: {len(unsigned_transaction['data'])} bytes"
                )

        #
        # Broadcast
        #

        signed_raw_transaction = self.transacting_power.sign_transaction(
            unsigned_transaction)
        txhash = self.client.send_raw_transaction(signed_raw_transaction)

        try:
            receipt = self.client.wait_for_receipt(txhash,
                                                   timeout=self.TIMEOUT)
        except TimeExhausted:
            # TODO: Handle transaction timeout
            raise
        else:
            self.log.debug(
                f"[RECEIPT-{transaction_name}] | txhash: {receipt['transactionHash'].hex()}"
            )

        #
        # Confirm
        #

        # Primary check
        deployment_status = receipt.get('status', UNKNOWN_TX_STATUS)
        if deployment_status is 0:
            failure = f"Transaction transmitted, but receipt returned status code 0. " \
                      f"Full receipt: \n {pprint.pformat(receipt, indent=2)}"
            raise self.InterfaceError(failure)

        if deployment_status is UNKNOWN_TX_STATUS:
            self.log.info(
                f"Unknown transaction status for {txhash} (receipt did not contain a status field)"
            )

            # Secondary check TODO: Is this a sensible check?
            tx = self.client.get_transaction(txhash)
            if tx["gas"] == receipt["gasUsed"]:
                raise self.InterfaceError(
                    f"Transaction consumed 100% of transaction gas."
                    f"Full receipt: \n {pprint.pformat(receipt, indent=2)}")

        return receipt

    def get_contract_by_name(
            self,
            name: str,
            proxy_name: str = None,
            use_proxy_address: bool = True) -> Union[Contract, List[tuple]]:
        """
        Instantiate a deployed contract from registry data,
        and assimilate it with its proxy if it is upgradeable,
        or return all registered records if use_proxy_address is False.
        """
        target_contract_records = self.registry.search(contract_name=name)

        if not target_contract_records:
            raise self.UnknownContract(
                f"No such contract records with name {name}.")

        if proxy_name:  # It's upgradeable
            # Lookup proxies; Search for a published proxy that targets this contract record

            proxy_records = self.registry.search(contract_name=proxy_name)

            results = list()
            for proxy_name, proxy_addr, proxy_abi in proxy_records:
                proxy_contract = self.client.w3.eth.contract(
                    abi=proxy_abi,
                    address=proxy_addr,
                    ContractFactoryClass=self._contract_factory)

                # Read this dispatcher's target address from the blockchain
                proxy_live_target_address = proxy_contract.functions.target(
                ).call()
                for target_name, target_addr, target_abi in target_contract_records:

                    if target_addr == proxy_live_target_address:
                        if use_proxy_address:
                            pair = (proxy_addr, target_abi)
                        else:
                            pair = (target_addr, target_abi)
                    else:
                        continue

                    results.append(pair)

            if len(results) > 1:
                address, abi = results[0]
                message = "Multiple {} deployments are targeting {}".format(
                    proxy_name, address)
                raise self.InterfaceError(message.format(name))

            else:
                selected_address, selected_abi = results[0]

        else:  # It's not upgradeable
            if len(target_contract_records) != 1:
                m = "Multiple records registered for non-upgradeable contract {}"
                raise self.InterfaceError(m.format(name))
            _target_contract_name, selected_address, selected_abi = target_contract_records[
                0]

        # Create the contract from selected sources
        unified_contract = self.client.w3.eth.contract(
            abi=selected_abi,
            address=selected_address,
            ContractFactoryClass=self._contract_factory)

        return unified_contract
Пример #14
0
class CharacterConfiguration(BaseConfiguration):
    """
    'Sideways Engagement' of Character classes; a reflection of input parameters.
    """

    VERSION = 1  # bump when static payload scheme changes

    CHARACTER_CLASS = NotImplemented
    DEFAULT_CONTROLLER_PORT = NotImplemented
    DEFAULT_DOMAIN = NetworksInventory.DEFAULT
    DEFAULT_NETWORK_MIDDLEWARE = RestMiddleware
    TEMP_CONFIGURATION_DIR_PREFIX = 'tmp-nucypher'

    # Gas
    DEFAULT_GAS_STRATEGY = 'fast'

    def __init__(self,

                 # Base
                 emitter=None,
                 config_root: str = None,
                 filepath: str = None,

                 # Mode
                 dev_mode: bool = False,
                 federated_only: bool = False,

                 # Identity
                 checksum_address: str = None,
                 crypto_power: CryptoPower = None,

                 # Keyring
                 keyring: NucypherKeyring = None,
                 keyring_root: str = None,

                 # Learner
                 learn_on_same_thread: bool = False,
                 abort_on_learning_error: bool = False,
                 start_learning_now: bool = True,

                 # Network
                 controller_port: int = None,
                 domains: Set[str] = None,  # TODO: Mapping between learning domains and "registry" domains - #1580
                 interface_signature: Signature = None,
                 network_middleware: RestMiddleware = None,

                 # Node Storage
                 known_nodes: set = None,
                 node_storage: NodeStorage = None,
                 reload_metadata: bool = True,
                 save_metadata: bool = True,

                 # Blockchain
                 poa: bool = False,
                 light: bool = False,
                 sync: bool = False,
                 provider_uri: str = None,
                 provider_process=None,
                 gas_strategy: Union[Callable, str] = DEFAULT_GAS_STRATEGY,
                 signer_uri: str = None,

                 # Registry
                 registry: BaseContractRegistry = None,
                 registry_filepath: str = None):

        self.log = Logger(self.__class__.__name__)
        UNINITIALIZED_CONFIGURATION.bool_value(False)

        # Identity
        # NOTE: NodeConfigurations can only be used with Self-Characters
        self.is_me = True
        self.checksum_address = checksum_address

        # Keyring
        self.crypto_power = crypto_power
        self.keyring = keyring or NO_KEYRING_ATTACHED
        self.keyring_root = keyring_root or UNINITIALIZED_CONFIGURATION

        # Contract Registry
        if registry and registry_filepath:
            if registry.filepath != registry_filepath:
                error = f"Inconsistent registry filepaths for '{registry.filepath}' and '{registry_filepath}'."
                raise ValueError(error)
            else:
                self.log.warn(f"Registry and registry filepath were both passed.")
        self.registry = registry or NO_BLOCKCHAIN_CONNECTION.bool_value(False)
        self.registry_filepath = registry_filepath or UNINITIALIZED_CONFIGURATION

        # Blockchain
        self.poa = poa
        self.is_light = light
        self.provider_uri = provider_uri or NO_BLOCKCHAIN_CONNECTION
        self.provider_process = provider_process or NO_BLOCKCHAIN_CONNECTION
        self.signer_uri = signer_uri or NO_BLOCKCHAIN_CONNECTION

        # Learner
        self.federated_only = federated_only
        self.domains = domains or {self.DEFAULT_DOMAIN}
        self.learn_on_same_thread = learn_on_same_thread
        self.abort_on_learning_error = abort_on_learning_error
        self.start_learning_now = start_learning_now
        self.save_metadata = save_metadata
        self.reload_metadata = reload_metadata
        self.known_nodes = known_nodes or set()  # handpicked

        # Configuration
        self.__dev_mode = dev_mode
        self.config_file_location = filepath or UNINITIALIZED_CONFIGURATION
        self.config_root = UNINITIALIZED_CONFIGURATION

        #
        # Federated vs. Blockchain arguments consistency
        #

        #
        # Federated
        #

        if self.federated_only:
            # Check for incompatible values
            blockchain_args = {'filepath': registry_filepath,
                               'poa': poa,
                               'provider_process': provider_process,
                               'provider_uri': provider_uri,
                               'gas_strategy': gas_strategy}
            if any(blockchain_args.values()):
                bad_args = (f"{arg}={val}" for arg, val in blockchain_args.items() if val)
                self.log.warn(f"Arguments {bad_args} are incompatible with federated_only. "
                              f"Overridden with a sane default.")

                # Clear decentralized attributes to ensure consistency with a
                # federated configuration.
                self.poa = False
                self.is_light = False
                self.provider_uri = None
                self.provider_process = None
                self.registry_filepath = None
                self.gas_strategy = None

        #
        # Decentralized
        #

        else:
            self.gas_strategy = gas_strategy
            is_initialized = BlockchainInterfaceFactory.is_interface_initialized(provider_uri=self.provider_uri)
            if not is_initialized and provider_uri:
                BlockchainInterfaceFactory.initialize_interface(provider_uri=self.provider_uri,
                                                                poa=self.poa,
                                                                light=self.is_light,
                                                                provider_process=self.provider_process,
                                                                sync=sync,
                                                                emitter=emitter,
                                                                gas_strategy=gas_strategy)
            else:
                self.log.warn(f"Using existing blockchain interface connection ({self.provider_uri}).")

            if not self.registry:
                # TODO: These two code blocks are untested.
                if not self.registry_filepath:  # TODO: Registry URI  (goerli://speedynet.json) :-)
                    self.log.info(f"Fetching latest registry from source.")
                    self.registry = InMemoryContractRegistry.from_latest_publication(network=list(self.domains)[0])  # TODO: #1580
                else:
                    self.registry = LocalContractRegistry(filepath=self.registry_filepath)
                    self.log.info(f"Using local registry ({self.registry}).")

        if dev_mode:
            self.__temp_dir = UNINITIALIZED_CONFIGURATION
            self.__setup_node_storage()
            self.initialize(password=DEVELOPMENT_CONFIGURATION)
        else:
            self.__temp_dir = LIVE_CONFIGURATION
            self.config_root = config_root or self.DEFAULT_CONFIG_ROOT
            self._cache_runtime_filepaths()
            self.__setup_node_storage(node_storage=node_storage)

        # Network
        self.controller_port = controller_port or self.DEFAULT_CONTROLLER_PORT
        self.network_middleware = network_middleware or self.DEFAULT_NETWORK_MIDDLEWARE(registry=self.registry)
        self.interface_signature = interface_signature

        super().__init__(filepath=self.config_file_location, config_root=self.config_root)

    def __call__(self, **character_kwargs):
        return self.produce(**character_kwargs)

    def update(self, **kwargs) -> None:
        """
        A facility for updating existing attributes on existing configuration instances.

        Warning: This method allows mutation and may result in an inconsistent configuration.
        """
        return super().update(modifier=self.checksum_address, filepath=self.config_file_location, **kwargs)

    @classmethod
    def generate(cls, password: str, *args, **kwargs):
        """Shortcut: Hook-up a new initial installation and write configuration file to the disk"""
        node_config = cls(dev_mode=False, *args, **kwargs)
        node_config.initialize(password=password)
        node_config.to_configuration_file()
        return node_config

    def cleanup(self) -> None:
        if self.__dev_mode:
            self.__temp_dir.cleanup()

    @property
    def dev_mode(self) -> bool:
        return self.__dev_mode

    def __setup_node_storage(self, node_storage=None) -> None:
        if self.dev_mode:
            node_storage = ForgetfulNodeStorage(registry=self.registry, federated_only=self.federated_only)
        elif not node_storage:
            node_storage = LocalFileBasedNodeStorage(registry=self.registry,
                                                     config_root=self.config_root,
                                                     federated_only=self.federated_only)
        self.node_storage = node_storage

    def forget_nodes(self) -> None:
        self.node_storage.clear()
        message = "Removed all stored node node metadata and certificates"
        self.log.debug(message)

    def destroy(self) -> None:
        """Parse a node configuration and remove all associated files from the filesystem"""
        self.attach_keyring()
        self.keyring.destroy()
        os.remove(self.config_file_location)

    def generate_parameters(self, **overrides) -> dict:
        """
        Warning: This method allows mutation and may result in an inconsistent configuration.
        """
        merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
        non_init_params = ('config_root',
                           'poa',
                           'light',
                           'provider_uri',
                           'registry_filepath',
                           'gas_strategy',
                           'signer_uri')
        character_init_params = filter(lambda t: t[0] not in non_init_params, merged_parameters.items())
        return dict(character_init_params)

    def produce(self, **overrides) -> CHARACTER_CLASS:
        """Initialize a new character instance and return it."""
        merged_parameters = self.generate_parameters(**overrides)
        character = self.CHARACTER_CLASS(**merged_parameters)
        return character

    @classmethod
    def assemble(cls, filepath: str = None, **overrides) -> dict:
        """
        Warning: This method allows mutation and may result in an inconsistent configuration.
        """
        payload = cls._read_configuration_file(filepath=filepath)
        node_storage = cls.load_node_storage(storage_payload=payload['node_storage'],
                                             federated_only=payload['federated_only'])
        domains = set(payload['domains'])

        # Assemble
        payload.update(dict(node_storage=node_storage, domains=domains))
        # Filter out None values from **overrides to detect, well, overrides...
        # Acts as a shim for optional CLI flags.
        overrides = {k: v for k, v in overrides.items() if v is not None}
        payload = {**payload, **overrides}
        return payload

    @classmethod
    def from_configuration_file(cls,
                                filepath: str = None,
                                provider_process=None,
                                **overrides  # < ---- Inlet for CLI Flags
                                ) -> 'CharacterConfiguration':
        """Initialize a CharacterConfiguration from a JSON file."""
        filepath = filepath or cls.default_filepath()
        assembled_params = cls.assemble(filepath=filepath, **overrides)
        try:
            node_configuration = cls(filepath=filepath, provider_process=provider_process, **assembled_params)
        except TypeError as e:
            raise cls.ConfigurationError(e)
        return node_configuration

    def validate(self) -> bool:

        # Top-level
        if not os.path.exists(self.config_root):
            raise self.ConfigurationError(f'No configuration directory found at {self.config_root}.')

        # Sub-paths
        filepaths = self.runtime_filepaths
        for field, path in filepaths.items():
            if path and not os.path.exists(path):
                message = 'Missing configuration file or directory: {}.'
                if 'registry' in path:
                    message += ' Did you mean to pass --federated-only?'
                raise CharacterConfiguration.InvalidConfiguration(message.format(path))
        return True

    def static_payload(self) -> dict:
        """Exported static configuration values for initializing Ursula"""

        payload = dict(

            # Identity
            federated_only=self.federated_only,
            checksum_address=self.checksum_address,
            keyring_root=self.keyring_root,

            # Behavior
            domains=list(self.domains),  # From Set
            learn_on_same_thread=self.learn_on_same_thread,
            abort_on_learning_error=self.abort_on_learning_error,
            start_learning_now=self.start_learning_now,
            save_metadata=self.save_metadata,
            node_storage=self.node_storage.payload(),
        )

        # Optional values (mode)
        if not self.federated_only:
            if self.provider_uri:
                if not self.signer_uri:
                    self.signer_uri = self.provider_uri
                payload.update(dict(provider_uri=self.provider_uri,
                                    poa=self.poa,
                                    light=self.is_light,
                                    signer_uri=self.signer_uri))
            if self.registry_filepath:
                payload.update(dict(registry_filepath=self.registry_filepath))

            # Gas Price
            payload.update(dict(gas_strategy=self.gas_strategy))

        # Merge with base payload
        base_payload = super().static_payload()
        base_payload.update(payload)

        return payload

    @property  # TODO: Graduate to a method and "derive" dynamic from static payload.
    def dynamic_payload(self) -> dict:
        """Exported dynamic configuration values for initializing Ursula"""
        payload = dict()
        if not self.federated_only:
            payload.update(dict(registry=self.registry, signer=Signer.from_signer_uri(self.signer_uri)))

        payload.update(dict(network_middleware=self.network_middleware or self.DEFAULT_NETWORK_MIDDLEWARE(),
                            known_nodes=self.known_nodes,
                            node_storage=self.node_storage,
                            crypto_power_ups=self.derive_node_power_ups()))
        return payload

    def generate_filepath(self, filepath: str = None, modifier: str = None, override: bool = False) -> str:
        modifier = modifier or self.checksum_address
        filepath = super().generate_filepath(filepath=filepath, modifier=modifier, override=override)
        return filepath

    @property
    def runtime_filepaths(self) -> dict:
        filepaths = dict(config_root=self.config_root,
                         keyring_root=self.keyring_root,
                         registry_filepath=self.registry_filepath)
        return filepaths

    @classmethod
    def generate_runtime_filepaths(cls, config_root: str) -> dict:
        """Dynamically generate paths based on configuration root directory"""
        filepaths = dict(config_root=config_root,
                         config_file_location=os.path.join(config_root, cls.generate_filename()),
                         keyring_root=os.path.join(config_root, 'keyring'))
        return filepaths

    def _cache_runtime_filepaths(self) -> None:
        """Generate runtime filepaths and cache them on the config object"""
        filepaths = self.generate_runtime_filepaths(config_root=self.config_root)
        for field, filepath in filepaths.items():
            if getattr(self, field) is UNINITIALIZED_CONFIGURATION:
                setattr(self, field, filepath)

    def attach_keyring(self, checksum_address: str = None, *args, **kwargs) -> None:
        account = checksum_address or self.checksum_address
        if not account:
            raise self.ConfigurationError("No account specified to unlock keyring")
        if self.keyring is not NO_KEYRING_ATTACHED:
            if self.keyring.checksum_address != account:
                raise self.ConfigurationError("There is already a keyring attached to this configuration.")
            return
        self.keyring = NucypherKeyring(keyring_root=self.keyring_root, account=account, *args, **kwargs)

    def derive_node_power_ups(self) -> List[CryptoPowerUp]:
        power_ups = list()
        if self.is_me and not self.dev_mode:
            for power_class in self.CHARACTER_CLASS._default_crypto_powerups:
                power_up = self.keyring.derive_crypto_power(power_class)
                power_ups.append(power_up)
        return power_ups

    def initialize(self, password: str) -> str:
        """Initialize a new configuration and write installation files to disk."""

        # Development
        if self.dev_mode:
            self.__temp_dir = TemporaryDirectory(prefix=self.TEMP_CONFIGURATION_DIR_PREFIX)
            self.config_root = self.__temp_dir.name

        # Persistent
        else:
            self._ensure_config_root_exists()
            self.write_keyring(password=password)

        self._cache_runtime_filepaths()
        self.node_storage.initialize()

        # Validate
        if not self.__dev_mode:
            self.validate()

        # Success
        message = "Created nucypher installation files at {}".format(self.config_root)
        self.log.debug(message)
        return self.config_root

    def write_keyring(self, password: str, checksum_address: str = None, **generation_kwargs) -> NucypherKeyring:

        if self.federated_only:
            checksum_address = FEDERATED_ADDRESS

        elif not checksum_address:

            # Note: It is assumed the blockchain interface is not yet connected.
            if self.provider_process:

                # Generate Geth's "datadir"
                if not os.path.exists(self.provider_process.data_dir):
                    os.mkdir(self.provider_process.data_dir)

                # Get or create wallet address
                if not self.checksum_address:
                    self.checksum_address = self.provider_process.ensure_account_exists(password=password)
                elif self.checksum_address not in self.provider_process.accounts():
                    raise self.ConfigurationError(f'Unknown Account {self.checksum_address}')

            elif not self.checksum_address:
                raise self.ConfigurationError(f'No checksum address provided for decentralized configuration.')

            checksum_address = self.checksum_address

        self.keyring = NucypherKeyring.generate(password=password,
                                                keyring_root=self.keyring_root,
                                                checksum_address=checksum_address,
                                                **generation_kwargs)

        if self.federated_only:
            self.checksum_address = self.keyring.checksum_address

        return self.keyring

    @classmethod
    def load_node_storage(cls, storage_payload: dict, federated_only: bool):
        from nucypher.config.storages import NodeStorage
        node_storage_subclasses = {storage._name: storage for storage in NodeStorage.__subclasses__()}
        storage_type = storage_payload[NodeStorage._TYPE_LABEL]
        storage_class = node_storage_subclasses[storage_type]
        node_storage = storage_class.from_payload(payload=storage_payload, federated_only=federated_only)
        return node_storage
Пример #15
0
class TESSBaseProtocol(LineOnlyReceiver):

    SOLICITED_RESPONSES = [
        {
            'name': 'firmware',
            'pattern': r'^Compiled (.+)',
        },
        {
            'name': 'mac',
            'pattern': r'^MAC: ([0-9A-Za-z]{12})',
        },
        {
            'name': 'zp',
            'pattern': r'^Actual CI: (\d{1,2}.\d{1,2})',
        },
        {
            'name': 'written_zp',
            'pattern': r'^New CI: (\d{1,2}.\d{1,2})',
        },
    ]

    SOLICITED_PATTERNS = []  # Filled in by subclasses

    # So that we can patch it in tests with Clock.callLater ...
    callLater = reactor.callLater

    # -------------------------
    # Twisted Line Receiver API
    # -------------------------

    def __init__(self, namespace):
        '''Sets the delimiter to the closihg parenthesis'''
        # LineOnlyReceiver.delimiter = b'\n'
        self.log = Logger(namespace=namespace)
        self._consumer = None
        self._paused = True
        self._stopped = False
        self.write_deferred = None
        self.read_deferred = None
        self.write_response = None
        self.read_response = None

    def connectionMade(self):
        self.log.debug("connectionMade()")

    def lineReceived(self, line):
        now = datetime.datetime.utcnow().replace(
            microsecond=0) + datetime.timedelta(seconds=0.5)
        line = line.decode('latin_1')  # from bytearray to string
        self.log.info("<== {label:6} [{l:02d}] {line}",
                      l=len(line),
                      label=self.label,
                      line=line)
        handled = self._handleSolicitedResponse(line, now)
        if handled:
            self._triggerCallbacks()
            return
        handled, reading = self._handleUnsolicitedResponse(line, now)
        if handled:
            self._consumer.write(reading)

    # -----------------------
    # IPushProducer interface
    # -----------------------

    def stopProducing(self):
        """
        Stop producing data.
        """
        self._stopped = False

    def pauseProducing(self):
        """
        Pause producing data.
        """
        self._paused = True

    def resumeProducing(self):
        """
        Resume producing data.
        """
        self._paused = False

    def registerConsumer(self, consumer):
        '''
        This is not really part of the IPushProducer interface
        '''
        self._consumer = IConsumer(consumer)

    # ================
    # TESS Protocol API
    # ================

    def setContext(self, context):
        pass

    def writeZeroPoint(self, zero_point):
        '''
        Writes Zero Point to the device. 
        Returns a Deferred
        '''
        line = 'CI{0:04d}'.format(int(round(zero_point * 100, 2)))
        self.log.info("==> {label:6} [{l:02d}] {line}",
                      label=self.label,
                      l=len(line),
                      line=line)
        self.sendLine(line.encode('ascii'))
        self.write_deferred = defer.Deferred()
        self.write_deferred.addTimeout(2, reactor)
        self.write_response = {}
        return self.write_deferred

    def readPhotometerInfo(self):
        '''
        Reads Info from the device.
        Returns a Deferred
        '''
        line = '?'
        self.log.info("==> {label:6} [{l:02d}] {line}",
                      label=self.label,
                      l=len(line),
                      line=line)
        self.sendLine(line.encode('ascii'))
        self.read_deferred = defer.Deferred()
        self.read_deferred.addTimeout(2, reactor)
        self.cnt = 0
        self.read_response = {}
        return self.read_deferred

    # --------------
    # Helper methods
    # --------------

    def _match_solicited(self, line):
        '''Returns matched command descriptor or None'''
        for regexp in self.SOLICITED_PATTERNS:
            matchobj = regexp.search(line)
            if matchobj:
                i = self.SOLICITED_PATTERNS.index(regexp)
                self.log.debug("matched {pattern}",
                               pattern=self.SOLICITED_RESPONSES[i]['name'])
                return self.SOLICITED_RESPONSES[i], matchobj
        return None, None

    def _triggerCallbacks(self):
        # trigger pending callbacks
        if self.read_deferred and self.cnt == 4:
            self.read_deferred.callback(self.read_response)
            self.read_deferred = None
            self.cnt = 0

        if self.write_deferred and 'zp' in self.write_response:
            self.write_deferred.callback(self.write_response)
            self.write_deferred = None

    def _handleSolicitedResponse(self, line, tstamp):
        '''
        Handle Solicted responses from zptess.
        Returns True if handled, False otherwise
        '''
        sr, matchobj = self._match_solicited(line)
        if not sr:
            return False
        handled = True
        if sr['name'] == 'name':
            self.read_response['tstamp'] = tstamp
            self.read_response['name'] = str(matchobj.group(1))
            self.cnt += 1
        elif sr['name'] == 'mac':
            self.read_response['tstamp'] = tstamp
            self.read_response['mac'] = format_mac(matchobj.group(1))
            self.cnt += 1
        elif sr['name'] == 'firmware':
            self.read_response['tstamp'] = tstamp
            self.read_response['firmware'] = str(matchobj.group(1))
            self.cnt += 1
        elif sr['name'] == 'zp':
            self.read_response['tstamp'] = tstamp
            self.read_response['zp'] = float(matchobj.group(1))
            self.cnt += 1
        elif sr['name'] == 'written_zp':
            self.write_response['tstamp'] = tstamp
            self.write_response['zp'] = float(matchobj.group(1))
        else:
            handled = False
        return handled

    def _handleUnsolicitedResponse(self, line, tstamp):
        '''
        Handle Unsolicted responses from zptess.
        Returns True if handled, False otherwise
        '''
        if self._paused or self._stopped:
            #self.log.debug("Producer either paused({p}) or stopped({s})", p=self._paused, s=self._stopped)
            return False, None
        try:
            reading = json.loads(line)
        except Exception as e:
            return False, None
        else:
            reading['tstamp'] = tstamp
            return True, reading
Пример #16
0
class NucypherClickConfig:
    __LOG_TO_SENTRY_ENVVAR = "NUCYPHER_SENTRY_LOGS"
    __NUCYPHER_SENTRY_ENDPOINT = "https://[email protected]/1310685"
    _KEYRING_PASSPHRASE_ENVVAR = "NUCYPHER_KEYRING_PASSPHRASE"

    # Set to False to completely opt-out of sentry reporting
    log_to_sentry = True  # TODO: Use envvar
    log_to_file = True  # TODO: Use envvar

    # Pending Configuration Named Tuple
    PendingConfigurationDetails = collections.namedtuple(
        'PendingConfigurationDetails',
        'passphrase wallet signing tls skip_keys save_file')

    def __init__(self):

        #
        # Logging
        #

        if self.log_to_sentry:
            import sentry_sdk
            import logging

            sentry_logging = LoggingIntegration(
                level=logging.INFO,  # Capture info and above as breadcrumbs
                event_level=logging.DEBUG  # Send debug logs as events
            )
            sentry_sdk.init(dsn=self.__NUCYPHER_SENTRY_ENDPOINT,
                            integrations=[sentry_logging],
                            release=nucypher.__version__)

            globalLogPublisher.addObserver(logToSentry)

        if self.log_to_file is True:
            globalLogPublisher.addObserver(getTextFileObserver())

        self.log = Logger(self.__class__.__name__)

        # Node Configuration
        self.node_configuration = NO_NODE_CONFIGURATION
        self.dev = NO_NODE_CONFIGURATION
        self.federated_only = NO_NODE_CONFIGURATION
        self.config_root = NO_NODE_CONFIGURATION
        self.config_file = NO_NODE_CONFIGURATION

        # Blockchain
        self.deployer = NO_BLOCKCHAIN_CONNECTION
        self.compile = NO_BLOCKCHAIN_CONNECTION
        self.poa = NO_BLOCKCHAIN_CONNECTION
        self.blockchain = NO_BLOCKCHAIN_CONNECTION
        self.provider_uri = NO_BLOCKCHAIN_CONNECTION
        self.registry_filepath = NO_BLOCKCHAIN_CONNECTION
        self.accounts = NO_BLOCKCHAIN_CONNECTION

        # Agency
        self.token_agent = NO_BLOCKCHAIN_CONNECTION
        self.miner_agent = NO_BLOCKCHAIN_CONNECTION
        self.policy_agent = NO_BLOCKCHAIN_CONNECTION

    def get_node_configuration(self,
                               configuration_class=UrsulaConfiguration,
                               **overrides):
        if self.dev:
            node_configuration = configuration_class(
                temp=self.dev,
                auto_initialize=False,
                federated_only=self.federated_only)
        else:
            try:
                filepath = self.config_file or UrsulaConfiguration.DEFAULT_CONFIG_FILE_LOCATION
                node_configuration = configuration_class.from_configuration_file(
                    filepath=filepath)
            except FileNotFoundError:
                if self.config_root:
                    node_configuration = configuration_class(
                        temp=False,
                        config_root=self.config_root,
                        auto_initialize=False)
                else:
                    node_configuration = configuration_class(
                        federated_only=self.federated_only,
                        auto_initialize=False,
                        **overrides)
            else:
                click.secho("Reading Ursula node configuration file {}".format(
                    filepath),
                            fg='blue')

        self.node_configuration = node_configuration

    def connect_to_blockchain(self):
        if self.federated_only:
            raise NodeConfiguration.ConfigurationError(
                "Cannot connect to blockchain in federated mode")
        if self.deployer:
            self.registry_filepath = NodeConfiguration.REGISTRY_SOURCE
        if self.compile:
            click.confirm("Compile solidity source?", abort=True)
        self.blockchain = Blockchain.connect(provider_uri=self.provider_uri,
                                             deployer=self.deployer,
                                             compile=self.compile)
        if self.poa:
            w3 = self.blockchain.interface.w3
            w3.middleware_stack.inject(geth_poa_middleware, layer=0)
        self.accounts = self.blockchain.interface.w3.eth.accounts
        self.log.debug("CLI established connection to provider {}".format(
            self.blockchain.interface.provider_uri))

    def connect_to_contracts(self) -> None:
        """Initialize contract agency and set them on config"""
        self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)
        self.miner_agent = MinerAgent(blockchain=self.blockchain)
        self.policy_agent = PolicyAgent(blockchain=self.blockchain)
        self.log.debug("CLI established connection to nucypher contracts")

    def create_account(self, passphrase: str = None) -> str:
        """Creates a new local or hosted ethereum wallet"""
        choice = click.prompt("Create a new Hosted or Local account?",
                              default='hosted',
                              type=click.STRING).strip().lower()
        if choice not in ('hosted', 'local'):
            click.echo("Invalid Input")
            raise click.Abort()

        if not passphrase:
            message = "Enter a passphrase to encrypt your wallet's private key"
            passphrase = click.prompt(message,
                                      hide_input=True,
                                      confirmation_prompt=True)

        if choice == 'local':
            keyring = NucypherKeyring.generate(
                passphrase=passphrase,
                keyring_root=self.node_configuration.keyring_dir,
                encrypting=False,
                wallet=True)
            new_address = keyring.checksum_address
        elif choice == 'hosted':
            new_address = self.blockchain.interface.w3.personal.newAccount(
                passphrase)
        else:
            raise click.BadParameter(
                "Invalid choice; Options are hosted or local.")
        return new_address

    def _collect_pending_configuration_details(
            self,
            ursula: bool = False,
            force: bool = False,
            rest_host=None) -> PendingConfigurationDetails:

        # Defaults
        passphrase = None
        skip_all_key_generation, generate_wallet = False, False
        generate_encrypting_keys, generate_tls_keys, save_node_configuration_file = True, True, True

        if ursula:
            if not self.federated_only:  # Wallet
                generate_wallet = click.confirm(
                    "Do you need to generate a new wallet to use for staking?",
                    default=False)

                if not generate_wallet:  # I'll take that as a no...
                    self.federated_only = True  # TODO: Without a wallet...
                    #  let's understand this to be a "federated configuration"

            if generate_tls_keys or force:
                if not force and not rest_host:
                    rest_host = click.prompt(
                        "Enter Node's Public IPv4 Address", type=IPV4_ADDRESS)

                self.node_configuration.rest_host = rest_host

        if not force:  # Signing / Encrypting
            if not any((generate_wallet, generate_tls_keys,
                        generate_encrypting_keys)):
                skip_all_key_generation = click.confirm(
                    "Skip all key generation (Provide custom configuration file)?"
                )

        if not skip_all_key_generation:
            if os.environ.get(self._KEYRING_PASSPHRASE_ENVVAR):
                passphrase = os.environ.get(self._KEYRING_PASSPHRASE_ENVVAR)
            else:
                passphrase = click.prompt(
                    "Enter a passphrase to encrypt your keyring",
                    hide_input=True,
                    confirmation_prompt=True)

        details = self.PendingConfigurationDetails(
            passphrase=passphrase,
            wallet=generate_wallet,
            signing=generate_encrypting_keys,
            tls=generate_tls_keys,
            skip_keys=skip_all_key_generation,
            save_file=save_node_configuration_file)
        return details

    def create_new_configuration(self,
                                 ursula: bool = False,
                                 force: bool = False,
                                 rest_host: str = None,
                                 no_registry: bool = False):

        if force:
            click.secho("Force is enabled - Using defaults", fg='yellow')
        if self.dev:
            click.secho("Using temporary storage area", fg='blue')

        if not no_registry and not self.federated_only:
            registry_source = self.node_configuration.REGISTRY_SOURCE
            if not os.path.isfile(registry_source):
                click.echo(
                    "Seed contract registry does not exist at path {}.  "
                    "Use --no-registry to skip.".format(registry_source))
                raise click.Abort()

        if self.config_root:  # Custom installation location
            self.node_configuration.config_root = self.config_root
        self.node_configuration.federated_only = self.federated_only

        try:
            pending_config = self._collect_pending_configuration_details(
                force=force, ursula=ursula, rest_host=rest_host)
            new_installation_path = self.node_configuration.initialize(
                passphrase=pending_config.passphrase,
                wallet=pending_config.wallet,
                encrypting=pending_config.signing,
                tls=pending_config.tls,
                no_registry=no_registry,
                no_keys=pending_config.skip_keys,
                host=rest_host)
            if not pending_config.skip_keys:
                click.secho("Generated new keys at {}".format(
                    self.node_configuration.keyring_dir),
                            fg='blue')
        except NodeConfiguration.ConfigurationError as e:
            click.secho(str(e), fg='red')
            raise click.Abort()
        else:
            click.secho("Created nucypher installation files at {}".format(
                new_installation_path),
                        fg='green')
            if pending_config.save_file is True:
                configuration_filepath = self.node_configuration.to_configuration_file(
                    filepath=self.config_file)
                click.secho("Saved node configuration file {}".format(
                    configuration_filepath),
                            fg='green')
                if ursula:
                    click.secho(
                        "\nTo run an Ursula node from the "
                        "default configuration filepath run 'nucypher ursula run'\n"
                    )

    def forget_nodes(self) -> None:
        def __destroy_dir_contents(path):
            for file in os.listdir(path):
                file_path = os.path.join(path, file)
                if os.path.isfile(file_path):
                    os.unlink(file_path)

        click.confirm("Remove all known node data?", abort=True)
        certificates_dir = self.node_configuration.known_certificates_dir
        metadata_dir = os.path.join(self.node_configuration.known_nodes_dir,
                                    'metadata')

        __destroy_dir_contents(certificates_dir)
        __destroy_dir_contents(metadata_dir)
        click.secho("Removed all stored node node metadata and certificates")

    def destroy_configuration(self) -> None:
        if self.dev:
            raise NodeConfiguration.ConfigurationError(
                "Cannot destroy a temporary node configuration")
        click.confirm('''
        
*Permanently and irreversibly delete all* nucypher files including:
  - Private and Public Keys
  - Known Nodes
  - TLS certificates
  - Node Configurations
  - Log Files
  
  Continue?'''.format(self.node_configuration.config_root),
                      abort=True)

        shutil.rmtree(USER_LOG_DIR)
        shutil.rmtree(self.node_configuration.config_root, ignore_errors=True)
        click.secho("Deleted configuration files at {}".format(
            self.node_configuration.config_root),
                    fg='blue')
Пример #17
0
class Felix(Character, NucypherTokenActor):
    """
    A NuCypher ERC20 faucet / Airdrop scheduler.

    Felix is a web application that gives NuCypher *testnet* tokens to registered addresses
    with a scheduled reduction of disbursement amounts, and an HTTP endpoint
    for handling new address registration.

    The main goal of Felix is to provide a source of testnet tokens for
    research and the development of production-ready nucypher dApps.
    """

    _default_crypto_powerups = [SigningPower]

    # Intervals
    DISTRIBUTION_INTERVAL = 60  # seconds
    DISBURSEMENT_INTERVAL = 24 * 365  # only distribute tokens to the same address once each YEAR.
    STAGING_DELAY = 10  # seconds

    # Disbursement
    BATCH_SIZE = 10  # transactions
    MULTIPLIER = Decimal(
        '0.9')  # 10% reduction of previous disbursement is 0.9
    # this is not relevant until the year of time declared above, passes.
    MINIMUM_DISBURSEMENT = int(1e18)  # NuNits (1 NU)
    ETHER_AIRDROP_AMOUNT = int(1e17)  # Wei (.1 ether)
    MAX_INDIVIDUAL_REGISTRATIONS = 3  # Registration Limit

    # Node Discovery
    LEARNING_TIMEOUT = 30  # seconds
    _SHORT_LEARNING_DELAY = 60  # seconds
    _LONG_LEARNING_DELAY = 120  # seconds
    _ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN = 1

    # Twisted
    _CLOCK = reactor
    _AIRDROP_QUEUE = dict()

    class NoDatabase(RuntimeError):
        pass

    def __init__(self,
                 db_filepath: str,
                 rest_host: str,
                 rest_port: int,
                 client_password: str = None,
                 crash_on_error: bool = False,
                 distribute_ether: bool = True,
                 registry: BaseContractRegistry = None,
                 *args,
                 **kwargs):

        # Character
        super().__init__(registry=registry, *args, **kwargs)
        self.log = Logger(f"felix-{self.checksum_address[-6::]}")

        # Network
        self.rest_port = rest_port
        self.rest_host = rest_host
        self.rest_app = NOT_RUNNING
        self.crash_on_error = crash_on_error

        # Database
        self.db_filepath = db_filepath
        self.db = NO_DATABASE_AVAILABLE
        self.db_engine = create_engine(f'sqlite:///{self.db_filepath}',
                                       convert_unicode=True)

        # Blockchain
        transacting_power = TransactingPower(password=client_password,
                                             account=self.checksum_address,
                                             cache=True)
        self._crypto_power.consume_power_up(transacting_power)

        self.token_agent = ContractAgency.get_agent(NucypherTokenAgent,
                                                    registry=registry)
        self.blockchain = self.token_agent.blockchain
        self.reserved_addresses = [self.checksum_address, NULL_ADDRESS]

        # Update reserved addresses with deployed contracts
        existing_entries = list(registry.enrolled_addresses)
        self.reserved_addresses.extend(existing_entries)

        # Distribution
        self.__distributed = 0  # Track NU Output
        self.__airdrop = 0  # Track Batch
        self.__disbursement = 0  # Track Quantity
        self._distribution_task = LoopingCall(f=self.airdrop_tokens)
        self._distribution_task.clock = self._CLOCK
        self.start_time = NOT_RUNNING

        self.economics = EconomicsFactory.get_economics(registry=registry)
        self.MAXIMUM_DISBURSEMENT = self.economics.maximum_allowed_locked
        self.INITIAL_DISBURSEMENT = self.economics.minimum_allowed_locked * 3

        # Optionally send ether with each token transaction
        self.distribute_ether = distribute_ether
        # Banner
        self.log.info(FELIX_BANNER.format(self.checksum_address))

    def __repr__(self):
        class_name = self.__class__.__name__
        r = f'{class_name}(checksum_address={self.checksum_address}, db_filepath={self.db_filepath})'
        return r

    def make_web_app(self):
        from flask import request
        from flask_sqlalchemy import SQLAlchemy

        # WSGI/Flask Service
        short_name = bytes(self.stamp).hex()[:6]
        self.rest_app = Flask(f"faucet-{short_name}",
                              template_folder=TEMPLATES_DIR)
        self.rest_app.config[
            'SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{self.db_filepath}'
        self.rest_app.config['MAX_CONTENT_LENGTH'] = MAX_UPLOAD_CONTENT_LENGTH

        try:
            self.rest_app.secret_key = sha256(
                os.environ['NUCYPHER_FELIX_DB_SECRET'].encode())  # uses envvar
        except KeyError:
            raise OSError(
                "The 'NUCYPHER_FELIX_DB_SECRET' is not set.  Export your application secret and try again."
            )

        # Database
        self.db = SQLAlchemy(self.rest_app)

        # Database Tables
        class Recipient(self.db.Model):
            """
            The one and only table in Felix's database; Used to track recipients and airdrop metadata.
            """

            __tablename__ = 'recipient'

            id = self.db.Column(self.db.Integer, primary_key=True)
            address = self.db.Column(self.db.String, nullable=False)
            joined = self.db.Column(self.db.DateTime,
                                    nullable=False,
                                    default=datetime.utcnow)
            total_received = self.db.Column(self.db.String,
                                            default='0',
                                            nullable=False)
            last_disbursement_amount = self.db.Column(self.db.String,
                                                      nullable=False,
                                                      default=0)
            last_disbursement_time = self.db.Column(self.db.DateTime,
                                                    nullable=True,
                                                    default=None)
            is_staking = self.db.Column(self.db.Boolean,
                                        nullable=False,
                                        default=False)

            def __repr__(self):
                return f'{self.__class__.__name__}(id={self.id})'

        self.Recipient = Recipient  # Bind to outer class

        # Flask decorators
        rest_app = self.rest_app

        #
        # REST Routes
        #
        @rest_app.route("/status", methods=['GET'])
        def status():
            with ThreadedSession(self.db_engine) as session:
                total_recipients = session.query(self.Recipient).count()
                last_recipient = session.query(self.Recipient).filter(
                    self.Recipient.last_disbursement_time.isnot(
                        None)).order_by('last_disbursement_time').first()

                last_address = last_recipient.address if last_recipient else None
                last_transaction_date = last_recipient.last_disbursement_time.isoformat(
                ) if last_recipient else None

                unfunded = session.query(self.Recipient).filter(
                    self.Recipient.last_disbursement_time.is_(None)).count()

                return json.dumps({
                    "total_recipients": total_recipients,
                    "latest_recipient": last_address,
                    "latest_disburse_date": last_transaction_date,
                    "unfunded_recipients": unfunded,
                    "state": {
                        "eth": str(self.eth_balance),
                        "NU": str(self.token_balance),
                        "address": self.checksum_address,
                        "contract_address": self.token_agent.contract_address,
                    }
                })

        @rest_app.route("/register", methods=['POST'])
        def register():
            """Handle new recipient registration via POST request."""

            new_address = (request.form.get('address')
                           or request.get_json().get('address'))

            if not new_address:
                return Response(response="no address was supplied", status=411)

            if not eth_utils.is_address(new_address):
                return Response(
                    response=
                    "an invalid ethereum address was supplied.  please ensure the address is a proper checksum.",
                    status=400)
            else:
                new_address = eth_utils.to_checksum_address(new_address)

            if new_address in self.reserved_addresses:
                return Response(
                    response=
                    "sorry, that address is reserved and cannot receive funds.",
                    status=403)

            try:
                with ThreadedSession(self.db_engine) as session:

                    existing = Recipient.query.filter_by(
                        address=new_address).all()
                    if len(existing) > self.MAX_INDIVIDUAL_REGISTRATIONS:
                        # Address already exists; Abort
                        self.log.debug(
                            f"{new_address} is already enrolled {self.MAX_INDIVIDUAL_REGISTRATIONS} times."
                        )
                        return Response(
                            response=
                            f"{new_address} requested too many times  -  Please use another address.",
                            status=409)

                    # Create the record
                    recipient = Recipient(address=new_address,
                                          joined=datetime.now())
                    session.add(recipient)
                    session.commit()

            except Exception as e:
                # Pass along exceptions to the logger
                self.log.critical(str(e))
                raise

            else:
                return Response(status=200)  # TODO

        return rest_app

    def create_tables(self) -> None:
        self.make_web_app()
        return self.db.create_all(app=self.rest_app)

    def start(self,
              host: str,
              port: int,
              web_services: bool = True,
              distribution: bool = True,
              crash_on_error: bool = False):

        self.crash_on_error = crash_on_error

        if self.start_time is not NOT_RUNNING:
            raise RuntimeError("Felix is already running.")

        self.start_time = maya.now()
        payload = {"wsgi": self.rest_app, "http_port": port}
        deployer = HendrixDeploy(action="start", options=payload)

        if distribution is True:
            self.start_distribution()

        if web_services is True:
            deployer.run()  # <-- Blocking call (Reactor)

    def start_distribution(self, now: bool = True) -> bool:
        """Start token distribution"""
        self.log.info(NU_BANNER)
        self.log.info("Starting NU Token Distribution | START")
        if self.token_balance == NU.ZERO():
            raise self.ActorError(
                f"Felix address {self.checksum_address} has 0 NU tokens.")
        self._distribution_task.start(interval=self.DISTRIBUTION_INTERVAL,
                                      now=now)
        return True

    def stop_distribution(self) -> bool:
        """Start token distribution"""
        self.log.info("Stopping NU Token Distribution | STOP")
        self._distribution_task.stop()
        return True

    def __calculate_disbursement(self, recipient) -> int:
        """Calculate the next reward for a recipient once the are selected for distribution"""

        # Initial Reward - sets the future rates
        if recipient.last_disbursement_time is None:
            amount = self.INITIAL_DISBURSEMENT

        # Cap reached, We'll continue to leak the minimum disbursement
        elif int(recipient.total_received) >= self.MAXIMUM_DISBURSEMENT:
            amount = self.MINIMUM_DISBURSEMENT

        # Calculate the next disbursement
        else:
            amount = math.ceil(
                int(recipient.last_disbursement_amount) * self.MULTIPLIER)
            if amount < self.MINIMUM_DISBURSEMENT:
                amount = self.MINIMUM_DISBURSEMENT

        return int(amount)

    def __transfer(self, disbursement: int, recipient_address: str) -> str:
        """Perform a single token transfer transaction from one account to another."""

        # Re-unlock from cache
        self.blockchain.transacting_power.activate()

        self.__disbursement += 1
        receipt = self.token_agent.transfer(
            amount=disbursement,
            target_address=recipient_address,
            sender_address=self.checksum_address)
        txhash = receipt['transactionHash']
        if self.distribute_ether:
            ether = self.ETHER_AIRDROP_AMOUNT
            transaction = {
                'to': recipient_address,
                'from': self.checksum_address,
                'value': ether,
                'gasPrice': self.blockchain.client.gas_price
            }
            ether_txhash = self.blockchain.client.send_transaction(transaction)

            self.log.info(
                f"Disbursement #{self.__disbursement} OK | NU {txhash.hex()[-6:]} | ETH {ether_txhash.hex()[:-6]} "
                f"({str(NU(disbursement, 'NuNit'))} + {self.ETHER_AIRDROP_AMOUNT} wei) -> {recipient_address}"
            )

        else:
            self.log.info(
                f"Disbursement #{self.__disbursement} OK | {txhash.hex()[-6:]} |"
                f"({str(NU(disbursement, 'NuNit'))} -> {recipient_address}")

        return txhash

    def airdrop_tokens(self):
        """
        Calculate airdrop eligibility via faucet registration
        and transfer tokens to selected recipients.
        """

        with ThreadedSession(self.db_engine) as session:
            population = session.query(self.Recipient).count()

        message = f"{population} registered faucet recipients; " \
                  f"Distributed {str(NU(self.__distributed, 'NuNit'))} since {self.start_time.slang_time()}."
        self.log.debug(message)
        if population == 0:
            return  # Abort - no recipients are registered.

        # For filtration
        since = datetime.now() - timedelta(hours=self.DISBURSEMENT_INTERVAL)

        datetime_filter = or_(self.Recipient.last_disbursement_time <= since,
                              self.Recipient.last_disbursement_time ==
                              None)  # This must be `==` not `is`

        with ThreadedSession(self.db_engine) as session:
            candidates = session.query(
                self.Recipient).filter(datetime_filter).all()
            if not candidates:
                self.log.info("No eligible recipients this round.")
                return

        # Discard invalid addresses, in-depth
        invalid_addresses = list()

        def siphon_invalid_entries(candidate):
            address_is_valid = eth_utils.is_checksum_address(candidate.address)
            if not address_is_valid:
                invalid_addresses.append(candidate.address)
            return address_is_valid

        candidates = list(filter(siphon_invalid_entries, candidates))

        if invalid_addresses:
            self.log.info(
                f"{len(invalid_addresses)} invalid entries detected. Pruning database."
            )

            # TODO: Is this needed? - Invalid entries are rejected at the endpoint view.
            # Prune database of invalid records
            # with ThreadedSession(self.db_engine) as session:
            #     bad_eggs = session.query(self.Recipient).filter(self.Recipient.address in invalid_addresses).all()
            #     for egg in bad_eggs:
            #         session.delete(egg.id)
            #     session.commit()

        if not candidates:
            self.log.info("No eligible recipients this round.")
            return

        d = threads.deferToThread(self.__do_airdrop, candidates=candidates)
        self._AIRDROP_QUEUE[self.__airdrop] = d
        return d

    def __do_airdrop(self, candidates: list):

        self.log.info(f"Staging Airdrop #{self.__airdrop}.")

        # Staging
        staged_disbursements = [(r, self.__calculate_disbursement(recipient=r))
                                for r in candidates]
        batches = list(
            staged_disbursements[index:index + self.BATCH_SIZE]
            for index in range(0, len(staged_disbursements), self.BATCH_SIZE))
        total_batches = len(batches)

        self.log.info("====== Staged Airdrop ======")
        for recipient, disbursement in staged_disbursements:
            self.log.info(f"{recipient.address} ... {str(disbursement)[:-18]}")
        self.log.info("==========================")

        # Staging Delay
        self.log.info(
            f"Airdrop will commence in {self.STAGING_DELAY} seconds...")
        if self.STAGING_DELAY > 3:
            time.sleep(self.STAGING_DELAY - 3)
        for i in range(3):
            time.sleep(1)
            self.log.info(f"NU Token airdrop starting in {3 - i} seconds...")

        # Slowly, in series...
        for batch, staged_disbursement in enumerate(batches, start=1):
            self.log.info(f"======= Batch #{batch} ========")

            for recipient, disbursement in staged_disbursement:

                # Perform the transfer... leaky faucet.
                self.__transfer(disbursement=disbursement,
                                recipient_address=recipient.address)
                self.__distributed += disbursement

                # Update the database record
                recipient.last_disbursement_amount = str(disbursement)
                recipient.total_received = str(
                    int(recipient.total_received) + disbursement)
                recipient.last_disbursement_time = datetime.now()

                self.db.session.add(recipient)
                self.db.session.commit()

            # end inner loop
            self.log.info(
                f"Completed Airdrop #{self.__airdrop} Batch #{batch} of {total_batches}."
            )

        # end outer loop
        now = maya.now()
        next_interval_slang = now.add(
            seconds=self.DISTRIBUTION_INTERVAL).slang_time()
        self.log.info(
            f"Completed Airdrop #{self.__airdrop}; Next airdrop is {next_interval_slang}."
        )

        del self._AIRDROP_QUEUE[self.__airdrop]
        self.__airdrop += 1
Пример #18
0
class BaseProtocol(serialBytesProtocol):

    def __init__(self, shorthand=True, callback=None, escaped=False,
                 error_callback=None):

        serialBytesProtocol.__init__(self)
        if callback:
            self.callbacks = [callback]
        else:
            self.callbacks = []
        self.setRawMode()
        self.shorthand = shorthand
        self._escaped = escaped
        self.log = Logger()
        self.requests = {}
        self.command_id = 0
        self.buffer = None
#         self.reading = False

    def get_id(self):
        try:
            self.command_id += 1
            return intToByte(self.command_id)
        except ValueError:
            self.command_id = 1
            return intToByte(1)

    def connect(self, f):
        if f.callback:
            self.callbacks.append(f.callback)
        f.proto = self

    def rawDataReceived(self, data):
        for byte in data:
            if self.buffer:
                self.buffer.fill(byte)
                if self.buffer.remaining_bytes() == 0:
                    try:
                        # Try to parse and return result
                        self.buffer.parse()
                        # Ignore empty frames
                        if len(self.buffer.data) == 0:
                            self.buffer = None

                    except ValueError:
                        # Bad frame, so restart
                        self.log.warn('Bad frame: %r'
                                      % self.buffer.raw_data)

                    else:
                        self.read_frame(self.buffer.data)
                    self.buffer = None
#                     self.reading = False
            else:
                if byte == Frame.START_BYTE:
                    #                     self.reading == True
                    self.buffer = Frame(escaped=self._escaped)

    def read_frame(self, frame):
        """
        read_frame: binary data -> {'id':str,
                                         'param':binary data,
                                         ...}
        read_frame takes a data packet received from an XBee device
        and converts it into a dictionary. This dictionary provides
        names for each segment of binary data as specified in the
        api_responses spec.
        """
        # Fetch the first byte, identify the packet
        # If the spec doesn't exist, raise exception
        packet_id = frame[0:1]
        try:
            name = self.api_responses[packet_id]
        except AttributeError:
            raise NotImplementedError(
                "API response specifications could not be found; " +
                "use a derived class which defines 'api_responses'.")
        except KeyError:
            # Check to see if this ID can be found among transmittible packets
            for cmd_name, cmd in list(self.api_frames.items()):
                if cmd['id']['default'] == packet_id:
                    msg = "Incoming frame with id {packet_id} looks like a " +\
                        "command frame of type '{cmd_name}' (these should " +\
                        " not be received). Are you sure your devices " +\
                        "are in API mode?"
                    self.log.error(
                        msg, packet_id=bytes(frame), cmd_name=cmd_name)
                    return

            self.log.error("Unrecognized response packet with id byte {f}",
                           f=frame[0])
            return

        # Current byte index in the data stream
        packet = self.api_frames[name]
        index = 0
        callback = False

        # Result info
        info = {'id': name}
#         packet_spec = packet['structure']

        # Parse the packet in the order specified

        if 'frame_id' in packet:
            callback = True

#         if packet['len'] == 'null_terminated':
#             field_data = b''
#             while frame[index:index + 1] != b'\x00':
#                 field_data += frame[index:index + 1]
#                 index += 1
#             index += 1
#             info[name]
        for field, dic in packet.items():
            if dic['len'] == 'null_terminated':
                field_data = b''

                while frame[index:index] != b'\x00':
                    field_data += frame[index:index]
                    index += 1

                index += 1
                info[field] = field_data
            elif dic['len'] is not None:
                # Store the number of bytes specified

                # Are we trying to read beyond the last data element?
                if index + dic['len'] > len(frame):
                    raise ValueError(
                        "Response packet was shorter than expected")

                field_data = frame[index:index + dic['len']]
                info[field] = field_data

                index += dic['len']
            # If the data field has no length specified, store any
            #  leftover bytes and quit
            else:
                field_data = frame[index:-1]

                # Were there any remaining bytes?
                if field_data:
                    # If so, store them
                    info[field] = field_data
                    index += len(field_data) + 1
                break

        # If there are more bytes than expected, raise an exception
        if index + 1 < len(frame):
            raise ValueError(
                "Response packet was longer than expected; " +
                "expected: %d, got: %d bytes" % (index, len(frame)))

        # Apply parsing rules if any exist
        if 'parsing' in packet:
            for parse_rule in packet['parsing']:
                # Only apply a rule if it is relevant (raw data is available)
                if parse_rule[0] in info:
                    # Apply the parse function to the indicated field and
                    # replace the raw data with the result
                    info[parse_rule[0]] = parse_rule[1](self, info)
        if callback:
            if info['frame_id'] in self.requests:
                self.requests[info['frame_id']].callback(info)
                del self.requests[info['frame_id']]
            else:
                self.log.warn('Response without request: %r' % info)
        elif self.callbacks:
            for callback in self.callbacks:
                callback(info)
        else:
            self.log.debug(info)

    def _build_command(self, cmd, **kwargs):
        """
        _build_command: string (binary data) ... -> binary data
        _build_command will construct a command packet according to the
        specified command's specification in api_commands. It will expect
        named arguments for all fields other than those with a default
        value or a length of 'None'.
        Each field will be written out in the order they are defined
        in the command definition.
        """
        try:
            cmd_spec = self.api_frames[cmd]
        except AttributeError:
            raise NotImplementedError(
                "API command specifications could not be found; " +
                "use a derived class which defines 'api_commands'.")

        packet = b''

        if 'frame_id' in kwargs:
            fid = kwargs['frame_id']
        elif cmd in ['source_route']:
            fid = b'\x00'
        else:
            fid = self.get_id()
        for name, dic in cmd_spec.items():
            if name == 'frame_id':
                data = fid
            elif name in kwargs:
                data = kwargs[name]
            else:
                if dic['len']:
                    if dic['default']:
                        data = dic['default']
                    else:
                        raise KeyError(
                            "The expected field %s of length %d was " +
                            "not provided" % (name, dic['len']))
                else:
                    data = None
            if dic['len'] and len(data) != dic['len']:
                raise ValueError(
                    "The data provided for '%s' was not %d bytes long"
                    % (name, dic['len']))
            if data:
                packet += data

        return packet, fid

    def send(self, cmd, **kwargs):
        """
        send: string param=binary data ... -> None
        When send is called with the proper arguments, an API command
        will be written to the serial port for this XBee device
        containing the proper instructions and data.
        This method must be called with named arguments in accordance
        with the api_command specification. Arguments matching all
        field names other than those in reserved_names (like 'id' and
        'order') should be given, unless they are of variable length
        (of 'None' in the specification. Those are optional).
        """
        # Pass through the keyword arguments
#         if self.reading:
#             return task.deferLater(.5, self.send, cmd, **kwargs)
        packet, fid = self._build_command(cmd, **kwargs)
        d = defer.Deferred()
        self.requests.update({fid: d})
        f = Frame(packet).output()
        self.transport.write(f)
        return d

    def _parse_samples_header(self, io_bytes):
        """
        _parse_samples_header: binary data in XBee IO data format ->
                        (int, [int ...], [int ...], int, int)
        _parse_samples_header will read the first three bytes of the
        binary data given and will return the number of samples which
        follow, a list of enabled digital inputs, a list of enabled
        analog inputs, the dio_mask, and the size of the header in bytes
        """
        header_size = 4

        # number of samples (always 1?) is the first byte
        sample_count = byteToInt(io_bytes[0])

        # part of byte 1 and byte 2 are the DIO mask ( 9 bits )
        dio_mask = (
            byteToInt(io_bytes[1]) << 8 | byteToInt(io_bytes[2])) & 0x01FF

        # upper 7 bits of byte 1 is the AIO mask
        aio_mask = byteToInt(io_bytes[3]) & 0xFE >> 1
#         print(byteToInt(io_bytes[3]) & 0xFE >> 1)
#         print(aio_mask)

        # sorted lists of enabled channels; value is position of bit in mask
        dio_chans = []
        aio_chans = []

        for i in range(0, 9):
            if dio_mask & (1 << i):
                dio_chans.append(i)

        dio_chans.sort()

        for i in range(0, 7):
            if aio_mask & (1 << i):
                aio_chans.append(i)

        aio_chans.sort()

        return (sample_count, dio_chans, aio_chans, dio_mask, header_size)

    def _parse_samples(self, io_bytes):
        """
        _parse_samples: binary data in XBee IO data format ->
                        [ {"dio-0":True,
                           "dio-1":False,
                           "adc-0":100"}, ...]
        _parse_samples reads binary data from an XBee device in the IO
        data format specified by the API. It will then return a
        dictionary indicating the status of each enabled IO port.
        """

        sample_count, dio_chans, aio_chans, dio_mask, header_size = \
            self._parse_samples_header(io_bytes)

        samples = []

        # split the sample data into a list, so it can be pop()'d
#         self.log.debug('%r' % io_bytes)
        sample_bytes = [byteToInt(c) for c in io_bytes[header_size:]]
#         self.log.debug('%r' % sample_bytes)
#         self.log.debug('%r' % aio_chans)

        # repeat for every sample provided
        for sample_ind in range(0, sample_count):  # @UnusedVariable
            tmp_samples = {}

            if dio_chans:
                # we have digital data
                digital_data_set = (
                    sample_bytes.pop(0) << 8 | sample_bytes.pop(0))
                digital_values = dio_mask & digital_data_set

                for i in dio_chans:
                    tmp_samples['dio-{0}'.format(i)] = True if (
                        digital_values >> i) & 1 else False

            for i in aio_chans:
                analog_sample = (
                    sample_bytes.pop(0) << 8 | sample_bytes.pop(0))
                tmp_samples['adc-{0}'.format(i)] = int(
                    (analog_sample * 1200.0) / 1023.0)

            samples.append(tmp_samples)

        return samples

    def _parse_sensor_data(self, io_bytes):
        # TODO
        return [{'data': io_bytes}]

    def __getattr__(self, name):
        """
        If a method by the name of a valid api command is called,
        the arguments will be automatically sent to an appropriate
        send() call
        """

        # If api_commands is not defined, raise NotImplementedError\
        #  If its not defined, _getattr__ will be called with its name
        if name == 'api_frames':
            raise NotImplementedError(
                "API command specifications could not be found; use a " +
                "derived class which defines 'api_commands'.")

        # Is shorthand enabled, and is the called name a command?
        if self.shorthand and name in self.api_frames:
            # If so, simply return a function which passes its arguments
            # to an appropriate send() call
            return lambda **kwargs: self.send(name, **kwargs)
        else:
            raise AttributeError("XBee has no attribute '%s'" % name)
Пример #19
0
logger.info('DatabasePort running. Initializing values...')

logger.info("Checking existance of module: mysql.connector")
try:
    import mysql.connector
except:
    logger.info("Module mysql.connector not found. Trying to install it")

    import pip
    pip.main(['install', 'mysql-connector-python'])

    logger.info("Done. Trying to import again.")
    import mysql.connector

# get values
logger.debug('Enter MySQL username:'******'MySQL']['user'] = raw_input()
logger.debug('Enter MySQL Password:'******'MySQL']['pass'] = raw_input()
logger.debug(
    'Enter old MySQL database name (the one from which you are going to port):'
)
CONFIG['MySQL']['oldname'] = raw_input()
logger.debug(
    'Enter new MySQL database name (the one to which new data is transferred):'
)
CONFIG['MySQL']['dbname'] = raw_input()

logger.info('Checking DB Name conventions, and rules')

if not CONFIG['MySQL']['dbname'].endswith('line'):
Пример #20
0
class CharacterConfiguration(BaseConfiguration):
    """
    'Sideways Engagement' of Character classes; a reflection of input parameters.
    """

    CHARACTER_CLASS = NotImplemented
    DEFAULT_CONTROLLER_PORT = NotImplemented
    DEFAULT_PROVIDER_URI = 'http://localhost:8545'
    DEFAULT_DOMAIN = 'goerli'
    DEFAULT_NETWORK_MIDDLEWARE = RestMiddleware
    TEMP_CONFIGURATION_DIR_PREFIX = 'tmp-nucypher'

    def __init__(
            self,

            # Base
            config_root: str = None,
            filepath: str = None,

            # Mode
            dev_mode: bool = False,
            federated_only: bool = False,

            # Identity
            checksum_address: str = None,
            crypto_power: CryptoPower = None,

            # Keyring
            keyring: NucypherKeyring = None,
            keyring_root: str = None,

            # Learner
            learn_on_same_thread: bool = False,
            abort_on_learning_error: bool = False,
            start_learning_now: bool = True,

            # Network
            controller_port: int = None,
            domains: Set[str] = None,
            interface_signature: Signature = None,
            network_middleware: RestMiddleware = None,

            # Node Storage
            known_nodes: set = None,
            node_storage: NodeStorage = None,
            reload_metadata: bool = True,
            save_metadata: bool = True,

            # Blockchain
            poa: bool = False,
            provider_uri: str = None,
            provider_process=None,

            # Registry
            registry_filepath: str = None,
            download_registry: bool = True) -> None:

        self.log = Logger(self.__class__.__name__)

        # Identity
        # NOTE: NodeConfigurations can only be used with Self-Characters
        self.is_me = True
        self.checksum_address = checksum_address

        # Network
        self.controller_port = controller_port or self.DEFAULT_CONTROLLER_PORT
        self.network_middleware = network_middleware or self.DEFAULT_NETWORK_MIDDLEWARE(
        )
        self.interface_signature = interface_signature

        # Keyring
        self.crypto_power = crypto_power
        self.keyring = keyring or NO_KEYRING_ATTACHED
        self.keyring_root = keyring_root or UNINITIALIZED_CONFIGURATION

        # Federated vs. Blockchain arguments compatibility
        blockchain_args = {
            'download_registry': download_registry,
            'registry_filepath': registry_filepath,
            'poa': poa,
            'provider_process': provider_process,
            'provider_uri': provider_uri
        }
        if federated_only and any(blockchain_args.values()):
            bad_args = (f"{arg}={val}" for arg, val in blockchain_args.items()
                        if val)
            # TODO: Warn or raise?
            self.log.warn(
                f"Arguments {bad_args} are incompatible with federated_only. "
                f"Overridden with a sane default.")
            poa = False
            provider_uri = None
            provider_process = None
            registry_filepath = None
            download_registry = False

        # Contract Registry
        self.download_registry = download_registry
        self.registry_filepath = registry_filepath or UNINITIALIZED_CONFIGURATION

        # Blockchain
        self.poa = poa
        self.provider_uri = provider_uri or self.DEFAULT_PROVIDER_URI
        self.provider_process = provider_process or NO_BLOCKCHAIN_CONNECTION
        self.blockchain = NO_BLOCKCHAIN_CONNECTION.bool_value(False)
        self.token_agent = NO_BLOCKCHAIN_CONNECTION
        self.staking_agent = NO_BLOCKCHAIN_CONNECTION
        self.policy_agent = NO_BLOCKCHAIN_CONNECTION

        # Learner
        self.federated_only = federated_only
        self.domains = domains or {self.DEFAULT_DOMAIN}
        self.learn_on_same_thread = learn_on_same_thread
        self.abort_on_learning_error = abort_on_learning_error
        self.start_learning_now = start_learning_now
        self.save_metadata = save_metadata
        self.reload_metadata = reload_metadata
        self.__known_nodes = known_nodes or set()  # handpicked
        self.__fleet_state = FleetStateTracker()

        # Configuration
        self.__dev_mode = dev_mode
        self.config_file_location = filepath or UNINITIALIZED_CONFIGURATION
        self.config_root = UNINITIALIZED_CONFIGURATION

        if dev_mode:
            self.__temp_dir = UNINITIALIZED_CONFIGURATION
            self.__setup_node_storage()
            self.initialize(password=DEVELOPMENT_CONFIGURATION)
        else:
            self.__temp_dir = LIVE_CONFIGURATION
            self.config_root = config_root or self.DEFAULT_CONFIG_ROOT
            self._cache_runtime_filepaths()
            self.__setup_node_storage(node_storage=node_storage)

        super().__init__(filepath=self.config_file_location,
                         config_root=self.config_root)

    def __call__(self, **character_kwargs):
        return self.produce(**character_kwargs)

    @classmethod
    def generate(cls, password: str, *args, **kwargs):
        """Shortcut: Hook-up a new initial installation and write configuration file to the disk"""
        node_config = cls(dev_mode=False, *args, **kwargs)
        node_config.initialize(password=password)
        node_config.to_configuration_file()
        return node_config

    def cleanup(self) -> None:
        if self.__dev_mode:
            self.__temp_dir.cleanup()

    @property
    def dev_mode(self) -> bool:
        return self.__dev_mode

    def get_blockchain_interface(self) -> None:
        if self.federated_only:
            raise CharacterConfiguration.ConfigurationError(
                "Cannot connect to blockchain in federated mode")

        registry = None
        if self.registry_filepath:
            registry = EthereumContractRegistry(
                registry_filepath=self.registry_filepath)

        self.blockchain = BlockchainInterface(
            provider_uri=self.provider_uri,
            poa=self.poa,
            registry=registry,
            provider_process=self.provider_process)

    def acquire_agency(self) -> None:
        self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)
        self.staking_agent = StakingEscrowAgent(blockchain=self.blockchain)
        self.policy_agent = PolicyManagerAgent(blockchain=self.blockchain)
        self.log.debug("Established connection to nucypher contracts")

    @property
    def known_nodes(self) -> FleetStateTracker:
        return self.__fleet_state

    def __setup_node_storage(self, node_storage=None) -> None:
        if self.dev_mode:
            node_storage = ForgetfulNodeStorage(
                blockchain=self.blockchain, federated_only=self.federated_only)
        elif not node_storage:
            node_storage = LocalFileBasedNodeStorage(
                blockchain=self.blockchain,
                federated_only=self.federated_only,
                config_root=self.config_root)
        self.node_storage = node_storage

    def read_known_nodes(self, additional_nodes=None) -> None:
        known_nodes = self.node_storage.all(federated_only=self.federated_only)
        known_nodes = {node.checksum_address: node for node in known_nodes}
        if additional_nodes:
            known_nodes.update(
                {node.checksum_address: node
                 for node in additional_nodes})
        if self.__known_nodes:
            known_nodes.update(
                {node.checksum_address: node
                 for node in self.__known_nodes})
        self.__fleet_state._nodes.update(known_nodes)
        self.__fleet_state.record_fleet_state(
            additional_nodes_to_track=self.__known_nodes)

    def forget_nodes(self) -> None:
        self.node_storage.clear()
        message = "Removed all stored node node metadata and certificates"
        self.log.debug(message)

    def destroy(self) -> None:
        """Parse a node configuration and remove all associated files from the filesystem"""
        self.attach_keyring()
        self.keyring.destroy()
        os.remove(self.config_file_location)

    def generate_parameters(self, **overrides) -> dict:
        merged_parameters = {
            **self.static_payload(),
            **self.dynamic_payload,
            **overrides
        }
        non_init_params = ('config_root', 'poa', 'provider_uri')
        character_init_params = filter(lambda t: t[0] not in non_init_params,
                                       merged_parameters.items())
        return dict(character_init_params)

    def produce(self, **overrides) -> CHARACTER_CLASS:
        """Initialize a new character instance and return it."""
        merged_parameters = self.generate_parameters(**overrides)
        character = self.CHARACTER_CLASS(**merged_parameters)
        return character

    @classmethod
    def assemble(cls, filepath: str = None, **overrides) -> dict:

        payload = cls._read_configuration_file(filepath=filepath)
        node_storage = cls.load_node_storage(
            storage_payload=payload['node_storage'],
            federated_only=payload['federated_only'])
        domains = set(payload['domains'])

        # Assemble
        payload.update(dict(node_storage=node_storage, domains=domains))
        # Filter out None values from **overrides to detect, well, overrides...
        # Acts as a shim for optional CLI flags.
        overrides = {k: v for k, v in overrides.items() if v is not None}
        payload = {**payload, **overrides}
        return payload

    @classmethod
    def from_configuration_file(cls,
                                filepath: str = None,
                                provider_process=None,
                                **overrides) -> 'CharacterConfiguration':
        """Initialize a CharacterConfiguration from a JSON file."""
        filepath = filepath or cls.default_filepath()
        assembled_params = cls.assemble(filepath=filepath, **overrides)
        node_configuration = cls(filepath=filepath,
                                 provider_process=provider_process,
                                 **assembled_params)
        return node_configuration

    def validate(self, no_registry: bool = False) -> bool:

        # Top-level
        if not os.path.exists(self.config_root):
            raise self.ConfigurationError(
                f'No configuration directory found at {self.config_root}.')

        # Sub-paths
        filepaths = self.runtime_filepaths
        if no_registry:
            del filepaths['registry_filepath']

        for field, path in filepaths.items():
            if not os.path.exists(path):
                message = 'Missing configuration file or directory: {}.'
                if 'registry' in path:
                    message += ' Did you mean to pass --federated-only?'
                raise CharacterConfiguration.InvalidConfiguration(
                    message.format(path))
        return True

    def static_payload(self) -> dict:
        """Exported static configuration values for initializing Ursula"""

        payload = dict(

            # Identity
            federated_only=self.federated_only,
            checksum_address=self.checksum_address,
            keyring_root=self.keyring_root,

            # Behavior
            domains=list(self.domains),  # From Set
            provider_uri=self.provider_uri,
            learn_on_same_thread=self.learn_on_same_thread,
            abort_on_learning_error=self.abort_on_learning_error,
            start_learning_now=self.start_learning_now,
            save_metadata=self.save_metadata,
            node_storage=self.node_storage.payload(),
        )

        # Optional values (mode)
        if not self.federated_only:
            payload.update(dict(provider_uri=self.provider_uri, poa=self.poa))

        # Merge with base payload
        base_payload = super().static_payload()
        base_payload.update(payload)

        return payload

    @property
    def dynamic_payload(self) -> dict:
        """Exported dynamic configuration values for initializing Ursula"""
        payload = dict()
        if not self.federated_only:
            self.get_blockchain_interface()
            self.blockchain.connect()
            payload.update(blockchain=self.blockchain)

        #self.read_known_nodes()   # FIXME: Requires a connected blockchain - #1202
        payload.update(
            dict(network_middleware=self.network_middleware
                 or self.DEFAULT_NETWORK_MIDDLEWARE(),
                 known_nodes=self.known_nodes,
                 node_storage=self.node_storage,
                 crypto_power_ups=self.derive_node_power_ups()))
        return payload

    def generate_filepath(self,
                          filepath: str = None,
                          modifier: str = None,
                          override: bool = False) -> str:
        modifier = modifier or self.checksum_address
        filepath = super().generate_filepath(filepath=filepath,
                                             modifier=modifier,
                                             override=override)
        return filepath

    @property
    def runtime_filepaths(self) -> dict:
        filepaths = dict(config_root=self.config_root,
                         keyring_root=self.keyring_root,
                         registry_filepath=self.registry_filepath)
        return filepaths

    @classmethod
    def generate_runtime_filepaths(cls, config_root: str) -> dict:
        """Dynamically generate paths based on configuration root directory"""
        filepaths = dict(config_root=config_root,
                         config_file_location=os.path.join(
                             config_root, cls.generate_filename()),
                         keyring_root=os.path.join(config_root, 'keyring'),
                         registry_filepath=os.path.join(
                             config_root,
                             EthereumContractRegistry.REGISTRY_NAME))
        return filepaths

    def _cache_runtime_filepaths(self) -> None:
        """Generate runtime filepaths and cache them on the config object"""
        filepaths = self.generate_runtime_filepaths(
            config_root=self.config_root)
        for field, filepath in filepaths.items():
            if getattr(self, field) is UNINITIALIZED_CONFIGURATION:
                setattr(self, field, filepath)

    def attach_keyring(self,
                       checksum_address: str = None,
                       *args,
                       **kwargs) -> None:
        account = checksum_address or self.checksum_address
        if not account:
            raise self.ConfigurationError(
                "No account specified to unlock keyring")
        if self.keyring is not NO_KEYRING_ATTACHED:
            if self.keyring.checksum_address != account:
                raise self.ConfigurationError(
                    "There is already a keyring attached to this configuration."
                )
            return
        self.keyring = NucypherKeyring(keyring_root=self.keyring_root,
                                       account=account,
                                       *args,
                                       **kwargs)

    def derive_node_power_ups(self) -> List[CryptoPowerUp]:
        power_ups = list()
        if self.is_me and not self.dev_mode:
            for power_class in self.CHARACTER_CLASS._default_crypto_powerups:
                power_up = self.keyring.derive_crypto_power(power_class)
                power_ups.append(power_up)
        return power_ups

    def initialize(self, password: str) -> str:
        """Initialize a new configuration and write installation files to disk."""

        # Development
        if self.dev_mode:
            self.__temp_dir = TemporaryDirectory(
                prefix=self.TEMP_CONFIGURATION_DIR_PREFIX)
            self.config_root = self.__temp_dir.name

        # Persistent
        else:
            self._ensure_config_root_exists()
            self.write_keyring(password=password)

        self._cache_runtime_filepaths()
        self.node_storage.initialize()
        init_registry = self.download_registry and not self.federated_only
        if init_registry:
            self.registry_filepath = EthereumContractRegistry.download_latest_publication(
            )

        # Validate
        if not self.__dev_mode:
            self.validate(no_registry=not init_registry)

        # Success
        message = "Created nucypher installation files at {}".format(
            self.config_root)
        self.log.debug(message)
        return self.config_root

    def write_keyring(self,
                      password: str,
                      checksum_address: str = None,
                      **generation_kwargs) -> NucypherKeyring:

        if self.federated_only:
            checksum_address = FEDERATED_ADDRESS

        elif not checksum_address:

            # Note: It is assumed the blockchain interface is not yet connected.
            if self.provider_process:

                # Generate Geth's "datadir"
                if not os.path.exists(self.provider_process.data_dir):
                    os.mkdir(self.provider_process.data_dir)

                # Get or create wallet address
                if not self.checksum_address:
                    self.checksum_address = self.provider_process.ensure_account_exists(
                        password=password)
                elif self.checksum_address not in self.provider_process.accounts(
                ):
                    raise self.ConfigurationError(
                        f'Unknown Account {self.checksum_address}')

            elif not self.checksum_address:
                raise self.ConfigurationError(
                    f'No checksum address provided for decentralized configuration.'
                )

            checksum_address = self.checksum_address

        self.keyring = NucypherKeyring.generate(
            password=password,
            keyring_root=self.keyring_root,
            checksum_address=checksum_address,
            **generation_kwargs)

        if self.federated_only:
            self.checksum_address = self.keyring.checksum_address

        return self.keyring

    @classmethod
    def load_node_storage(cls, storage_payload: dict, federated_only: bool):
        from nucypher.config.storages import NodeStorage
        node_storage_subclasses = {
            storage._name: storage
            for storage in NodeStorage.__subclasses__()
        }
        storage_type = storage_payload[NodeStorage._TYPE_LABEL]
        storage_class = node_storage_subclasses[storage_type]
        node_storage = storage_class.from_payload(
            payload=storage_payload, federated_only=federated_only)
        return node_storage
Пример #21
0
class Felix(Character, NucypherTokenActor):
    """
    A NuCypher ERC20 faucet / Airdrop scheduler.

    Felix is a web application that gives NuCypher *testnet* tokens to registered addresses
    with a scheduled reduction of disbursement amounts, and an HTTP endpoint
    for handling new address registration.

    The main goal of Felix is to provide a source of testnet tokens for
    research and the development of production-ready nucypher dApps.
    """

    _default_crypto_powerups = [SigningPower]  # identity only

    TEMPLATE_NAME = 'felix.html'

    # Intervals
    DISTRIBUTION_INTERVAL = 60 * 60  # seconds (60*60=1Hr)
    DISBURSEMENT_INTERVAL = 24  # (24) hours
    STAGING_DELAY = 10  # seconds

    # Disbursement
    BATCH_SIZE = 10  # transactions
    MULTIPLIER = 0.95  # 5% reduction of previous stake is 0.95, for example
    MINIMUM_DISBURSEMENT = 1e18  # NuNits
    # TRANSACTION_GAS = 40000       # gas  TODO

    # Node Discovery
    LEARNING_TIMEOUT = 30  # seconds
    _SHORT_LEARNING_DELAY = 60  # seconds
    _LONG_LEARNING_DELAY = 120  # seconds
    _ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN = 1

    # Twisted
    _CLOCK = reactor
    _AIRDROP_QUEUE = dict()

    class NoDatabase(RuntimeError):
        pass

    def __init__(self,
                 db_filepath: str,
                 rest_host: str,
                 rest_port: int,
                 crash_on_error: bool = False,
                 economics: TokenEconomics = None,
                 *args,
                 **kwargs):

        # Character
        super().__init__(*args, **kwargs)
        self.log = Logger(f"felix-{self.checksum_public_address[-6::]}")

        # Network
        self.rest_port = rest_port
        self.rest_host = rest_host
        self.rest_app = NOT_RUNNING
        self.crash_on_error = crash_on_error

        # Database
        self.db_filepath = db_filepath
        self.db = NO_DATABASE_AVAILABLE
        self.db_engine = create_engine(f'sqlite:///{self.db_filepath}',
                                       convert_unicode=True)

        # Blockchain
        self.token_agent = NucypherTokenAgent(blockchain=self.blockchain)
        self.reserved_addresses = [
            self.checksum_public_address, Blockchain.NULL_ADDRESS
        ]

        # Update reserved addresses with deployed contracts
        existing_entries = list(
            self.blockchain.interface.registry.enrolled_addresses)
        self.reserved_addresses.extend(existing_entries)

        # Distribution
        self.__distributed = 0  # Track NU Output
        self.__airdrop = 0  # Track Batch
        self.__disbursement = 0  # Track Quantity
        self._distribution_task = LoopingCall(f=self.airdrop_tokens)
        self._distribution_task.clock = self._CLOCK
        self.start_time = NOT_RUNNING

        if not economics:
            economics = TokenEconomics()
        self.economics = economics

        self.MAXIMUM_DISBURSEMENT = economics.maximum_allowed_locked
        self.INITIAL_DISBURSEMENT = economics.minimum_allowed_locked

        # Banner
        self.log.info(FELIX_BANNER.format(self.checksum_public_address))

    def __repr__(self):
        class_name = self.__class__.__name__
        r = f'{class_name}(checksum_address={self.checksum_public_address}, db_filepath={self.db_filepath})'
        return r

    def make_web_app(self):
        from flask import request
        from flask_sqlalchemy import SQLAlchemy

        # WSGI/Flask Service
        short_name = bytes(self.stamp).hex()[:6]
        self.rest_app = Flask(f"faucet-{short_name}",
                              template_folder=TEMPLATES_DIR)
        self.rest_app.config[
            'SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{self.db_filepath}'
        try:
            self.rest_app.secret_key = sha256(
                os.environ['NUCYPHER_FELIX_DB_SECRET'].encode())  # uses envvar
        except KeyError:
            raise OSError(
                "The 'NUCYPHER_FELIX_DB_SECRET' is not set.  Export your application secret and try again."
            )

        # Database
        self.db = SQLAlchemy(self.rest_app)

        # Database Tables
        class Recipient(self.db.Model):
            """
            The one and only table in Felix's database; Used to track recipients and airdrop metadata.
            """

            __tablename__ = 'recipient'

            id = self.db.Column(self.db.Integer, primary_key=True)
            address = self.db.Column(self.db.String,
                                     unique=True,
                                     nullable=False)
            joined = self.db.Column(self.db.DateTime,
                                    nullable=False,
                                    default=datetime.utcnow)
            total_received = self.db.Column(self.db.String,
                                            default='0',
                                            nullable=False)
            last_disbursement_amount = self.db.Column(self.db.String,
                                                      nullable=False,
                                                      default=0)
            last_disbursement_time = self.db.Column(self.db.DateTime,
                                                    nullable=True,
                                                    default=None)
            is_staking = self.db.Column(self.db.Boolean,
                                        nullable=False,
                                        default=False)

            def __repr__(self):
                return f'{self.__class__.__name__}(id={self.id})'

        self.Recipient = Recipient  # Bind to outer class

        # Flask decorators
        rest_app = self.rest_app
        limiter = Limiter(self.rest_app,
                          key_func=get_remote_address,
                          headers_enabled=True)

        #
        # REST Routes
        #

        @rest_app.route("/", methods=['GET'])
        @limiter.limit("100/day;20/hour;1/minute")
        def home():
            rendering = render_template(self.TEMPLATE_NAME)
            return rendering

        @rest_app.route("/register", methods=['POST'])
        @limiter.limit("5 per day")
        def register():
            """Handle new recipient registration via POST request."""
            try:
                new_address = request.form['address']
            except KeyError:
                return Response(status=400)  # TODO

            if not eth_utils.is_checksum_address(new_address):
                return Response(status=400)  # TODO

            if new_address in self.reserved_addresses:
                return Response(status=400)  # TODO

            try:
                with ThreadedSession(self.db_engine) as session:

                    existing = Recipient.query.filter_by(
                        address=new_address).all()
                    if existing:
                        # Address already exists; Abort
                        self.log.debug(f"{new_address} is already enrolled.")
                        return Response(status=400)

                    # Create the record
                    recipient = Recipient(address=new_address,
                                          joined=datetime.now())
                    session.add(recipient)
                    session.commit()

            except Exception as e:
                # Pass along exceptions to the logger
                self.log.critical(str(e))
                raise

            else:
                return Response(status=200)  # TODO

        return rest_app

    def create_tables(self) -> None:
        self.make_web_app()
        return self.db.create_all(app=self.rest_app)

    def start(self,
              host: str,
              port: int,
              web_services: bool = True,
              distribution: bool = True,
              crash_on_error: bool = False):

        self.crash_on_error = crash_on_error

        if self.start_time is not NOT_RUNNING:
            raise RuntimeError("Felix is already running.")

        self.start_time = maya.now()
        payload = {"wsgi": self.rest_app, "http_port": port}
        deployer = HendrixDeploy(action="start", options=payload)
        click.secho(f"Running {self.__class__.__name__} on {host}:{port}")

        if distribution is True:
            self.start_distribution()

        if web_services is True:
            deployer.run()  # <-- Blocking call (Reactor)

    def start_distribution(self, now: bool = True) -> bool:
        """Start token distribution"""
        self.log.info(NU_BANNER)
        self.log.info("Starting NU Token Distribution | START")
        self._distribution_task.start(interval=self.DISTRIBUTION_INTERVAL,
                                      now=now)
        return True

    def stop_distribution(self) -> bool:
        """Start token distribution"""
        self.log.info("Stopping NU Token Distribution | STOP")
        self._distribution_task.stop()
        return True

    def __calculate_disbursement(self, recipient) -> int:
        """Calculate the next reward for a recipient once the are selected for distribution"""

        # Initial Reward - sets the future rates
        if recipient.last_disbursement_time is None:
            amount = self.INITIAL_DISBURSEMENT

        # Cap reached, We'll continue to leak the minimum disbursement
        elif int(recipient.total_received) >= self.MAXIMUM_DISBURSEMENT:
            amount = self.MINIMUM_DISBURSEMENT

        # Calculate the next disbursement
        else:
            amount = math.ceil(
                int(recipient.last_disbursement_amount) * self.MULTIPLIER)
            if amount < self.MINIMUM_DISBURSEMENT:
                amount = self.MINIMUM_DISBURSEMENT

        return int(amount)

    def __transfer(self, disbursement: int, recipient_address: str) -> str:
        """Perform a single token transfer transaction from one account to another."""

        self.__disbursement += 1
        txhash = self.token_agent.transfer(
            amount=disbursement,
            target_address=recipient_address,
            sender_address=self.checksum_public_address)

        self.log.info(
            f"Disbursement #{self.__disbursement} OK | {txhash.hex()[-6:]} | "
            f"({str(NU(disbursement, 'NuNit'))}) -> {recipient_address}")
        return txhash

    def airdrop_tokens(self):
        """
        Calculate airdrop eligibility via faucet registration
        and transfer tokens to selected recipients.
        """

        with ThreadedSession(self.db_engine) as session:
            population = session.query(self.Recipient).count()

        message = f"{population} registered faucet recipients; " \
                  f"Distributed {str(NU(self.__distributed, 'NuNit'))} since {self.start_time.slang_time()}."
        self.log.debug(message)
        if population is 0:
            return  # Abort - no recipients are registered.

        # For filtration
        since = datetime.now() - timedelta(hours=self.DISBURSEMENT_INTERVAL)

        datetime_filter = or_(self.Recipient.last_disbursement_time <= since,
                              self.Recipient.last_disbursement_time == None)

        with ThreadedSession(self.db_engine) as session:
            candidates = session.query(
                self.Recipient).filter(datetime_filter).all()
            if not candidates:
                self.log.info("No eligible recipients this round.")
                return

        # Discard invalid addresses, in-depth
        invalid_addresses = list()

        def siphon_invalid_entries(candidate):
            address_is_valid = eth_utils.is_checksum_address(candidate.address)
            if not address_is_valid:
                invalid_addresses.append(candidate.address)
            return address_is_valid

        candidates = list(filter(siphon_invalid_entries, candidates))

        if invalid_addresses:
            self.log.info(
                f"{len(invalid_addresses)} invalid entries detected. Pruning database."
            )

            # TODO: Is this needed? - Invalid entries are rejected at the endpoint view.
            # Prune database of invalid records
            # with ThreadedSession(self.db_engine) as session:
            #     bad_eggs = session.query(self.Recipient).filter(self.Recipient.address in invalid_addresses).all()
            #     for egg in bad_eggs:
            #         session.delete(egg.id)
            #     session.commit()

        if not candidates:
            self.log.info("No eligible recipients this round.")
            return

        d = threads.deferToThread(self.__do_airdrop, candidates=candidates)
        self._AIRDROP_QUEUE[self.__airdrop] = d
        return d

    def __do_airdrop(self, candidates: list):

        self.log.info(f"Staging Airdrop #{self.__airdrop}.")

        # Staging
        staged_disbursements = [(r, self.__calculate_disbursement(recipient=r))
                                for r in candidates]
        batches = list(
            staged_disbursements[index:index + self.BATCH_SIZE]
            for index in range(0, len(staged_disbursements), self.BATCH_SIZE))
        total_batches = len(batches)

        self.log.info("====== Staged Airdrop ======")
        for recipient, disbursement in staged_disbursements:
            self.log.info(f"{recipient.address} ... {str(disbursement)[:-18]}")
        self.log.info("==========================")

        # Staging Delay
        self.log.info(
            f"Airdrop will commence in {self.STAGING_DELAY} seconds...")
        if self.STAGING_DELAY > 3:
            time.sleep(self.STAGING_DELAY - 3)
        for i in range(3):
            time.sleep(1)
            self.log.info(f"NU Token airdrop starting in {3 - i} seconds...")

        # Slowly, in series...
        for batch, staged_disbursement in enumerate(batches, start=1):
            self.log.info(f"======= Batch #{batch} ========")

            for recipient, disbursement in staged_disbursement:

                # Perform the transfer... leaky faucet.
                self.__transfer(disbursement=disbursement,
                                recipient_address=recipient.address)
                self.__distributed += disbursement

                # Update the database record
                recipient.last_disbursement_amount = str(disbursement)
                recipient.total_received = str(
                    int(recipient.total_received) + disbursement)
                recipient.last_disbursement_time = datetime.now()

                self.db.session.add(recipient)
                self.db.session.commit()

            # end inner loop
            self.log.info(
                f"Completed Airdrop #{self.__airdrop} Batch #{batch} of {total_batches}."
            )

        # end outer loop
        now = maya.now()
        next_interval_slang = now.add(
            seconds=self.DISTRIBUTION_INTERVAL).slang_time()
        self.log.info(
            f"Completed Airdrop #{self.__airdrop}; Next airdrop is {next_interval_slang}."
        )

        del self._AIRDROP_QUEUE[self.__airdrop]
        self.__airdrop += 1
Пример #22
0
def ursula(click_config,
           action,
           debug,
           dev,
           quiet,
           dry_run,
           force,
           lonely,
           network,
           teacher_uri,
           min_stake,
           rest_host,
           rest_port,
           db_filepath,
           checksum_address,
           federated_only,
           poa,
           config_root,
           config_file,
           metadata_dir,  # TODO: Start nodes from an additional existing metadata dir
           provider_uri,
           no_registry,
           registry_filepath
           ) -> None:
    """
    Manage and run an Ursula node.

    \b
    Actions
    -------------------------------------------------
    \b
    run            Run an "Ursula" node.
    init           Create a new Ursula node configuration.
    view           View the Ursula node's configuration.
    forget         Forget all known nodes.
    save-metadata  Manually write node metadata to disk without running
    destroy        Delete Ursula node configuration.

    """

    #
    # Boring Setup Stuff
    #
    if not quiet:
        log = Logger('ursula.cli')

    if debug and quiet:
        raise click.BadOptionUsage(option_name="quiet", message="--debug and --quiet cannot be used at the same time.")

    if debug:
        click_config.log_to_sentry = False
        click_config.log_to_file = True
        globalLogPublisher.removeObserver(logToSentry)                          # Sentry
        globalLogPublisher.addObserver(SimpleObserver(log_level_name='debug'))  # Print

    elif quiet:
        globalLogPublisher.removeObserver(logToSentry)
        globalLogPublisher.removeObserver(SimpleObserver)
        globalLogPublisher.removeObserver(getJsonFileObserver())

    #
    # Pre-Launch Warnings
    #
    if not quiet:
        if dev:
            click.secho("WARNING: Running in development mode", fg='yellow')
        if force:
            click.secho("WARNING: Force is enabled", fg='yellow')

    #
    # Unauthenticated Configurations
    #
    if action == "init":
        """Create a brand-new persistent Ursula"""

        if dev and not quiet:
            click.secho("WARNING: Using temporary storage area", fg='yellow')

        if not config_root:                         # Flag
            config_root = click_config.config_file  # Envvar

        if not rest_host:
            rest_host = click.prompt("Enter Ursula's public-facing IPv4 address")

        ursula_config = UrsulaConfiguration.generate(password=click_config.get_password(confirm=True),
                                                     config_root=config_root,
                                                     rest_host=rest_host,
                                                     rest_port=rest_port,
                                                     db_filepath=db_filepath,
                                                     domains={network} if network else None,
                                                     federated_only=federated_only,
                                                     checksum_public_address=checksum_address,
                                                     no_registry=federated_only or no_registry,
                                                     registry_filepath=registry_filepath,
                                                     provider_uri=provider_uri,
                                                     poa=poa)

        if not quiet:
            click.secho("Generated keyring {}".format(ursula_config.keyring_dir), fg='green')
            click.secho("Saved configuration file {}".format(ursula_config.config_file_location), fg='green')

            # Give the use a suggestion as to what to do next...
            how_to_run_message = "\nTo run an Ursula node from the default configuration filepath run: \n\n'{}'\n"
            suggested_command = 'nucypher ursula run'
            if config_root is not None:
                config_file_location = os.path.join(config_root, config_file or UrsulaConfiguration.CONFIG_FILENAME)
                suggested_command += ' --config-file {}'.format(config_file_location)
            click.secho(how_to_run_message.format(suggested_command), fg='green')
            return  # FIN

        else:
            click.secho("OK")

    elif action == "destroy":
        """Delete all configuration files from the disk"""

        if dev:
            message = "'nucypher ursula destroy' cannot be used in --dev mode"
            raise click.BadOptionUsage(option_name='--dev', message=message)

        try:
            ursula_config = UrsulaConfiguration.from_configuration_file(filepath=config_file, domains={network})

        except FileNotFoundError:
            config_root = config_root or DEFAULT_CONFIG_ROOT
            config_file_location = config_file or UrsulaConfiguration.DEFAULT_CONFIG_FILE_LOCATION

            if not force:
                message = "No configuration file found at {}; \n" \
                          "Destroy top-level configuration directory: {}?".format(config_file_location, config_root)
                click.confirm(message, abort=True)  # ABORT

            shutil.rmtree(config_root, ignore_errors=False)

        else:
            if not force:
                click.confirm('''
*Permanently and irreversibly delete all* nucypher files including
    - Private and Public Keys
    - Known Nodes
    - TLS certificates
    - Node Configurations
    - Log Files

Delete {}?'''.format(ursula_config.config_root), abort=True)

            try:
                ursula_config.destroy(force=force)
            except FileNotFoundError:
                message = 'Failed: No nucypher files found at {}'.format(ursula_config.config_root)
                click.secho(message, fg='red')
                log.debug(message)
                raise click.Abort()
            else:
                message = "Deleted configuration files at {}".format(ursula_config.config_root)
                click.secho(message, fg='green')
                log.debug(message)

        if not quiet:
            click.secho("Destroyed {}".format(config_root))

        return

    # Development Configuration
    if dev:
        ursula_config = UrsulaConfiguration(dev_mode=True,
                                            domains={TEMPORARY_DOMAIN},
                                            poa=poa,
                                            registry_filepath=registry_filepath,
                                            provider_uri=provider_uri,
                                            checksum_public_address=checksum_address,
                                            federated_only=federated_only,
                                            rest_host=rest_host,
                                            rest_port=rest_port,
                                            db_filepath=db_filepath)
    # Authenticated Configurations
    else:

        # Deserialize network domain name if override passed
        if network:
            domain_constant = getattr(constants, network.upper())
            domains = {domain_constant}
        else:
            domains = None

        ursula_config = UrsulaConfiguration.from_configuration_file(filepath=config_file,
                                                                    domains=domains,
                                                                    registry_filepath=registry_filepath,
                                                                    provider_uri=provider_uri,
                                                                    rest_host=rest_host,
                                                                    rest_port=rest_port,
                                                                    db_filepath=db_filepath,

                                                                    # TODO: Handle Boolean overrides
                                                                    # poa=poa,
                                                                    # federated_only=federated_only,
                                                                    )

        try:  # Unlock Keyring
            if not quiet:
                click.secho('Decrypting keyring...', fg='blue')
            ursula_config.keyring.unlock(password=click_config.get_password())  # Takes ~3 seconds, ~1GB Ram
        except CryptoError:
            raise ursula_config.keyring.AuthenticationFailed

    if not ursula_config.federated_only:
        try:
            ursula_config.connect_to_blockchain(recompile_contracts=False)
            ursula_config.connect_to_contracts()
        except EthereumContractRegistry.NoRegistry:
            message = "Cannot configure blockchain character: No contract registry found; " \
                      "Did you mean to pass --federated-only?"
            raise EthereumContractRegistry.NoRegistry(message)

    click_config.ursula_config = ursula_config  # Pass Ursula's config onto staking sub-command


    #
    # Launch Warnings
    #
    if not quiet:
        if ursula_config.federated_only:
            click.secho("WARNING: Running in Federated mode", fg='yellow')

    #
    # Action Switch
    #
    if action == 'run':
        """Seed, Produce, Run!"""

        #
        # Seed - Step 1
        #
        teacher_nodes = list()
        if teacher_uri:
            node = Ursula.from_teacher_uri(teacher_uri=teacher_uri,
                                           min_stake=min_stake,
                                           federated_only=ursula_config.federated_only)
            teacher_nodes.append(node)

        #
        # Produce - Step 2
        #
        ursula = ursula_config(known_nodes=teacher_nodes, lonely=lonely)

        # GO!
        try:

            #
            # Run - Step 3
            #
            click.secho("Connecting to {}".format(','.join(str(d) for d in ursula_config.domains)), fg='blue', bold=True)
            click.secho("Running Ursula {} on {}".format(ursula, ursula.rest_interface), fg='green', bold=True)
            if not debug:
                stdio.StandardIO(UrsulaCommandProtocol(ursula=ursula))

            if dry_run:
                # That's all folks!
                return

            ursula.get_deployer().run()  # <--- Blocking Call (Reactor)

        except Exception as e:
            ursula_config.log.critical(str(e))
            click.secho("{} {}".format(e.__class__.__name__, str(e)), fg='red')
            raise  # Crash :-(

        finally:
            if not quiet:
                click.secho("Stopping Ursula")
            ursula_config.cleanup()
            if not quiet:
                click.secho("Ursula Stopped", fg='red')

        return

    elif action == "save-metadata":
        """Manually save a node self-metadata file"""

        ursula = ursula_config.produce(ursula_config=ursula_config)
        metadata_path = ursula.write_node_metadata(node=ursula)
        if not quiet:
            click.secho("Successfully saved node metadata to {}.".format(metadata_path), fg='green')
        return

    elif action == "view":
        """Paint an existing configuration to the console"""
        paint_configuration(config_filepath=config_file or ursula_config.config_file_location)
        return

    elif action == "forget":
        """Forget all known nodes via storages"""
        click.confirm("Permanently delete all known node data?", abort=True)
        ursula_config.forget_nodes()
        message = "Removed all stored node node metadata and certificates"
        click.secho(message=message, fg='red')
        return

    else:
        raise click.BadArgumentUsage("No such argument {}".format(action))
Пример #23
0
class Crawler(Learner):
    """
    Obtain Blockchain information for Monitor and output to a DB.
    """

    _SHORT_LEARNING_DELAY = 2
    _LONG_LEARNING_DELAY = 30
    _ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN = 25

    LEARNING_TIMEOUT = 10
    DEFAULT_REFRESH_RATE = 60  # seconds
    REFRESH_RATE_WINDOW = 0.25

    # InfluxDB Line Protocol Format (note the spaces, commas):
    # +-----------+--------+-+---------+-+---------+
    # |measurement|,tag_set| |field_set| |timestamp|
    # +-----------+--------+-+---------+-+---------+
    NODE_MEASUREMENT = 'crawler_node_info'
    NODE_LINE_PROTOCOL = '{measurement},staker_address={staker_address} ' \
                         'worker_address="{worker_address}",' \
                         'start_date={start_date},' \
                         'end_date={end_date},' \
                         'stake={stake},' \
                         'locked_stake={locked_stake},' \
                         'current_period={current_period}i,' \
                         'last_confirmed_period={last_confirmed_period}i ' \
                         '{timestamp}'

    EVENT_MEASUREMENT = 'crawler_event_info'
    EVENT_LINE_PROTOCOL = '{measurement},txhash={txhash} ' \
                          'contract_name="{contract_name}",' \
                          'contract_address="{contract_address}",' \
                          'event_name="{event_name}",' \
                          'block_number={block_number}i,' \
                          'args="{args}" ' \
                          '{timestamp}'

    INFLUX_DB_NAME = 'network'
    INFLUX_RETENTION_POLICY_NAME = 'network_info_retention'

    # TODO: review defaults for retention policy
    RETENTION = '5w'  # Weeks
    REPLICATION = '1'

    METRICS_ENDPOINT = 'stats'
    DEFAULT_CRAWLER_HTTP_PORT = 9555

    ERROR_EVENTS = {
        StakingEscrowAgent: ['Slashed'],
        AdjudicatorAgent: ['IncorrectCFragVerdict'],
        PolicyManagerAgent: ['NodeBrokenState'],
    }

    STAKER_PAGINATION_SIZE = 200

    def __init__(self,
                 influx_host: str,
                 influx_port: int,
                 crawler_http_port: int = DEFAULT_CRAWLER_HTTP_PORT,
                 registry: BaseContractRegistry = None,
                 node_storage_filepath: str = CrawlerNodeStorage.
                 DEFAULT_DB_FILEPATH,
                 refresh_rate=DEFAULT_REFRESH_RATE,
                 restart_on_error=True,
                 *args,
                 **kwargs):

        # Settings
        self.federated_only = False  # Nope - for compatibility with Learner TODO # nucypher/466
        Teacher.set_federated_mode(False)

        self.registry = registry or InMemoryContractRegistry.from_latest_publication(
        )
        self.economics = EconomicsFactory.get_economics(registry=self.registry)
        self._refresh_rate = refresh_rate
        self._restart_on_error = restart_on_error

        # TODO: Needs cleanup
        # Tracking
        node_storage = CrawlerNodeStorage(
            storage_filepath=node_storage_filepath)

        class MonitoringTracker(FleetSensor):
            def record_fleet_state(self, *args, **kwargs):
                new_state_or_none = super().record_fleet_state(*args, **kwargs)
                if new_state_or_none:
                    _, new_state = new_state_or_none
                    state = self.abridged_state_details(new_state)
                    node_storage.store_state_metadata(state)

        self.tracker_class = MonitoringTracker

        super().__init__(save_metadata=True,
                         node_storage=node_storage,
                         verify_node_bonding=False,
                         *args,
                         **kwargs)

        self.log = Logger(self.__class__.__name__)
        self.log.info(
            f"Storing node metadata in DB: {node_storage.db_filepath}")
        self.log.info(
            f"Storing blockchain metadata in DB: {influx_host}:{influx_port}")

        # In-memory Metrics
        self._stats = {'status': 'initializing'}
        self._crawler_client = None

        # Initialize InfluxDB
        self._db_host = influx_host
        self._db_port = influx_port
        self._influx_client = None

        # Agency
        self.staking_agent = ContractAgency.get_agent(StakingEscrowAgent,
                                                      registry=self.registry)

        # Crawler Tasks
        self.__collection_round = 0
        self.__collecting_nodes = False  # thread tracking
        self.__collecting_stats = False
        self.__events_from_block = 0  # from the beginning
        self.__collecting_events = False

        self._node_details_task = DelayedLoopingCall(
            f=self._learn_about_nodes,
            start_delay=random.randint(2, 15))  # random staggered start
        self._stats_collection_task = DelayedLoopingCall(
            f=self._collect_stats,
            threaded=True,
            start_delay=random.randint(2, 15))  # random staggered start
        self._events_collection_task = DelayedLoopingCall(
            f=self._collect_events,
            start_delay=random.randint(2, 15))  # random staggered start

        # JSON Endpoint
        self._crawler_http_port = crawler_http_port
        self._flask = None

    def _initialize_influx(self):
        try:
            db_list = self._influx_client.get_list_database()
        except requests.exceptions.ConnectionError:
            raise ConnectionError(
                f"No connection to InfluxDB at {self._db_host}:{self._db_port}"
            )
        found_db = (list(
            filter(lambda db: db['name'] == self.INFLUX_DB_NAME, db_list)))
        if len(found_db) == 0:
            # db not previously created
            self.log.info(
                f'Database {self.INFLUX_DB_NAME} not found, creating it')
            self._influx_client.create_database(self.INFLUX_DB_NAME)
            self._influx_client.create_retention_policy(
                name=self.INFLUX_RETENTION_POLICY_NAME,
                duration=self.RETENTION,
                replication=self.REPLICATION,
                database=self.INFLUX_DB_NAME,
                default=True)
        else:
            self.log.info(
                f'Database {self.INFLUX_DB_NAME} already exists, no need to create it'
            )

    def learn_from_teacher_node(self, *args, **kwargs):
        try:
            current_teacher = self.current_teacher_node(cycle=False)
        except self.NotEnoughTeachers as e:
            self.log.warn("Can't learn right now: {}".format(e.args[0]))
            return

        new_nodes = super().learn_from_teacher_node(*args, **kwargs)

        # update metadata of teacher - not just in memory but in the underlying storage system (db in this case)
        self.node_storage.store_node_metadata(current_teacher)
        self.node_storage.store_current_teacher(
            current_teacher.checksum_address)

        return new_nodes

    #
    # Measurements
    #

    @property
    def stats(self) -> dict:
        return self._stats

    @collector(label="Projected Stake and Stakers")
    def _measure_future_locked_tokens(self, periods: int = 365):
        period_range = range(1, periods + 1)
        token_counter = dict()
        for day in period_range:
            tokens, stakers = self.staking_agent.get_all_active_stakers(
                periods=day, pagination_size=self.STAKER_PAGINATION_SIZE)
            token_counter[day] = (float(NU.from_nunits(tokens).to_tokens()),
                                  len(stakers))
        return dict(token_counter)

    @collector(label="Top Stakes")
    def _measure_top_stakers(self) -> dict:
        _, stakers = self.staking_agent.get_all_active_stakers(
            periods=1, pagination_size=self.STAKER_PAGINATION_SIZE)
        data = dict(sorted(stakers.items(), key=lambda s: s[1], reverse=True))
        return data

    @collector(label="Staker Confirmation Status")
    def _measure_staker_activity(self) -> dict:
        confirmed, pending, inactive = self.staking_agent.partition_stakers_by_activity(
        )
        stakers = dict()
        stakers['active'] = len(confirmed)
        stakers['pending'] = len(pending)
        stakers['inactive'] = len(inactive)
        return stakers

    @collector(label="Date/Time of Next Period")
    def _measure_start_of_next_period(self) -> str:
        """Returns iso8601 datetime of next period"""
        current_period = datetime_to_period(
            datetime=maya.now(),
            seconds_per_period=self.economics.seconds_per_period)
        next_period = datetime_at_period(
            period=current_period + 1,
            seconds_per_period=self.economics.seconds_per_period,
            start_of_period=True)

        return next_period.iso8601()

    @collector(label="Known Nodes")
    def measure_known_nodes(self):

        #
        # Setup
        #
        current_period = datetime_to_period(
            datetime=maya.now(),
            seconds_per_period=self.economics.seconds_per_period)
        buckets = {
            -1: ('green', 'Confirmed'),  # Confirmed Next Period
            0: ('#e0b32d', 'Pending'),  # Pending Confirmation of Next Period
            current_period: ('#525ae3', 'Idle'),  # Never confirmed
            NULL_ADDRESS:
            ('#d8d9da', 'Headless')  # Headless Staker (No Worker)
        }

        shortest_uptime, newborn = float('inf'), None
        longest_uptime, uptime_king = 0, None

        uptime_template = '{days}d:{hours}h:{minutes}m'

        #
        # Scrape
        #

        payload = defaultdict(list)
        known_nodes = self._crawler_client.get_known_nodes_metadata()
        for staker_address in known_nodes:

            #
            # Confirmation Status Scraping
            #

            last_confirmed_period = self.staking_agent.get_last_committed_period(
                staker_address)
            missing_confirmations = current_period - last_confirmed_period
            worker = self.staking_agent.get_worker_from_staker(staker_address)
            if worker == NULL_ADDRESS:
                # missing_confirmations = NULL_ADDRESS
                continue  # TODO: Skip this DetachedWorker and do not display it
            try:
                color, status_message = buckets[missing_confirmations]
            except KeyError:
                color, status_message = 'red', f'Unconfirmed'
            node_status = {
                'status': status_message,
                'missed_confirmations': missing_confirmations,
                'color': color
            }

            #
            # Uptime Scraping
            #

            now = maya.now()
            timestamp = maya.MayaDT.from_iso8601(
                known_nodes[staker_address]['timestamp'])
            delta = now - timestamp

            node_qualifies_as_newborn = (
                delta.total_seconds() <
                shortest_uptime) and missing_confirmations == -1
            node_qualifies_for_uptime_king = (
                delta.total_seconds() >
                longest_uptime) and missing_confirmations == -1
            if node_qualifies_as_newborn:
                shortest_uptime, newborn = delta.total_seconds(
                ), staker_address
            elif node_qualifies_for_uptime_king:
                longest_uptime, uptime_king = delta.total_seconds(
                ), staker_address

            hours = delta.seconds // 3600
            minutes = delta.seconds % 3600 // 60
            natural_uptime = uptime_template.format(days=delta.days,
                                                    hours=hours,
                                                    minutes=minutes)

            #
            # Aggregate
            #

            known_nodes[staker_address]['status'] = node_status
            known_nodes[staker_address]['uptime'] = natural_uptime
            payload[status_message.lower()].append(known_nodes[staker_address])

        # There are not always winners...
        if newborn:
            known_nodes[newborn]['newborn'] = True
        if uptime_king:
            known_nodes[uptime_king]['uptime_king'] = True
        return payload

    def _collect_stats(self, threaded: bool = True) -> None:
        # TODO: Handle faulty connection to provider (requests.exceptions.ReadTimeout)
        if threaded:
            if self.__collecting_stats:
                self.log.debug(
                    "Skipping Round - Metrics collection thread is already running"
                )
                return
            return reactor.callInThread(self._collect_stats, threaded=False)
        self.__collection_round += 1
        self.__collecting_stats = True

        start = maya.now()
        click.secho(
            f"Scraping Round #{self.__collection_round} ========================",
            color='blue')
        self.log.info("Collecting Statistics...")

        #
        # Read
        #

        # Time
        block = self.staking_agent.blockchain.client.w3.eth.getBlock('latest')
        block_number = block.number
        block_time = block.timestamp  # epoch
        current_period = datetime_to_period(
            datetime=maya.now(),
            seconds_per_period=self.economics.seconds_per_period)
        click.secho("✓ ... Current Period", color='blue')
        next_period = self._measure_start_of_next_period()

        # Nodes
        teacher = self._crawler_client.get_current_teacher_checksum()
        states = self._crawler_client.get_previous_states_metadata()

        known_nodes = self.measure_known_nodes()

        activity = self._measure_staker_activity()

        # Stake
        #future_locked_tokens = self._measure_future_locked_tokens()
        global_locked_tokens = self.staking_agent.get_global_locked_tokens()
        click.secho("✓ ... Global Network Locked Tokens", color='blue')

        top_stakers = self._measure_top_stakers()

        #
        # Write
        #

        self._stats = {
            'blocknumber': block_number,
            'blocktime': block_time,
            'current_period': current_period,
            'next_period': next_period,
            'prev_states': states,
            'current_teacher': teacher,
            'known_nodes': len(self.known_nodes),
            'activity': activity,
            'node_details': known_nodes,
            'global_locked_tokens': global_locked_tokens,
            #'future_locked_tokens': future_locked_tokens,
            'top_stakers': top_stakers,
        }
        done = maya.now()
        delta = done - start
        self.__collecting_stats = False
        click.echo(
            f"Scraping round completed (duration {delta}).",
            color='yellow')  # TODO: Make optional, use emitter, or remove
        click.echo("==========================================")
        self.log.debug(f"Collected new metrics took {delta}.")

    @collector(label="Network Event Details")
    def _collect_events(self, threaded: bool = True):
        if threaded:
            if self.__collecting_events:
                self.log.debug(
                    "Skipping Round - Events collection thread is already running"
                )
                return
            return reactor.callInThread(self._collect_events, threaded=False)
        self.__collecting_events = True

        blockchain_client = self.staking_agent.blockchain.client
        latest_block_number = blockchain_client.block_number
        from_block = self.__events_from_block

        #block_time = latest_block.timestamp  # precision in seconds

        current_period = datetime_to_period(
            datetime=maya.now(),
            seconds_per_period=self.economics.seconds_per_period)

        events_list = list()
        for agent_class, event_names in self.ERROR_EVENTS.items():
            agent = ContractAgency.get_agent(agent_class,
                                             registry=self.registry)
            for event_name in event_names:
                events = [agent.contract.events[event_name]]
                for event in events:
                    entries = event.getLogs(fromBlock=from_block,
                                            toBlock=latest_block_number)
                    for event_record in entries:
                        record = EventRecord(event_record)
                        args = ", ".join(f"{k}:{v}"
                                         for k, v in record.args.items())
                        events_list.append(
                            self.EVENT_LINE_PROTOCOL.format(
                                measurement=self.EVENT_MEASUREMENT,
                                txhash=record.transaction_hash,
                                contract_name=agent.contract_name,
                                contract_address=agent.contract_address,
                                event_name=event_name,
                                block_number=record.block_number,
                                args=args,
                                timestamp=blockchain_client.w3.eth.getBlock(
                                    record.block_number).timestamp,
                            ))

        success = self._influx_client.write_points(
            events_list,
            database=self.INFLUX_DB_NAME,
            time_precision='s',
            batch_size=10000,
            protocol='line')
        self.__events_from_block = latest_block_number
        self.__collecting_events = False
        if not success:
            # TODO: What do we do here - Event hook for alerting?
            self.log.warn(
                f'Unable to write events to database {self.INFLUX_DB_NAME} '
                f'| Period {current_period} starting from block {from_block}')

    @collector(label="Known Node Details")
    def _learn_about_nodes(self, threaded: bool = True):
        if threaded:
            if self.__collecting_nodes:
                self.log.debug(
                    "Skipping Round - Nodes collection thread is already running"
                )
                return
            return reactor.callInThread(self._learn_about_nodes,
                                        threaded=False)
        self.__collecting_nodes = True

        agent = self.staking_agent
        known_nodes = list(self.known_nodes)

        block_time = agent.blockchain.client.get_blocktime(
        )  # precision in seconds
        current_period = datetime_to_period(
            datetime=maya.now(),
            seconds_per_period=self.economics.seconds_per_period)

        log = f'Processing {len(known_nodes)} nodes at {MayaDT(epoch=block_time)} | Period {current_period}'
        self.log.info(log)

        data = list()
        for node in known_nodes:

            staker_address = node.checksum_address
            worker = agent.get_worker_from_staker(staker_address)

            stake = agent.owned_tokens(staker_address)
            staked_nu_tokens = float(NU.from_nunits(stake).to_tokens())
            locked_nu_tokens = float(
                NU.from_nunits(
                    agent.get_locked_tokens(
                        staker_address=staker_address)).to_tokens())

            economics = EconomicsFactory.get_economics(registry=self.registry)
            stakes = StakeList(checksum_address=staker_address,
                               registry=self.registry)
            stakes.refresh()

            if stakes.initial_period is NOT_STAKING:
                continue  # TODO: Skip this measurement for now

            start_date = datetime_at_period(
                stakes.initial_period,
                seconds_per_period=economics.seconds_per_period)
            start_date = start_date.datetime().timestamp()
            end_date = datetime_at_period(
                stakes.terminal_period,
                seconds_per_period=economics.seconds_per_period)
            end_date = end_date.datetime().timestamp()

            last_confirmed_period = agent.get_last_committed_period(
                staker_address)

            num_work_orders = 0  # len(node.work_orders())  # TODO: Only works for is_me with datastore attached

            # TODO: do we need to worry about how much information is in memory if number of nodes is
            #  large i.e. should I check for size of data and write within loop if too big
            data.append(
                self.NODE_LINE_PROTOCOL.format(
                    measurement=self.NODE_MEASUREMENT,
                    staker_address=staker_address,
                    worker_address=worker,
                    start_date=start_date,
                    end_date=end_date,
                    stake=staked_nu_tokens,
                    locked_stake=locked_nu_tokens,
                    current_period=current_period,
                    last_confirmed_period=last_confirmed_period,
                    timestamp=block_time,
                    work_orders=num_work_orders))

        success = self._influx_client.write_points(
            data,
            database=self.INFLUX_DB_NAME,
            time_precision='s',
            batch_size=10000,
            protocol='line')
        self.__collecting_nodes = False
        if not success:
            # TODO: What do we do here - Event hook for alerting?
            self.log.warn(
                f'Unable to write node information to database {self.INFLUX_DB_NAME} at '
                f'{MayaDT(epoch=block_time)} | Period {current_period}')

    def make_flask_server(self):
        """JSON Endpoint"""
        flask = Flask('nucypher-monitor')
        self._flask = flask
        self._flask.config["JSONIFY_PRETTYPRINT_REGULAR"] = True

        @flask.route('/stats', methods=['GET'])
        def stats():
            response = jsonify(self._stats)
            return response

    def _handle_errors(self, *args, **kwargs):
        failure = args[0]
        cleaned_traceback = failure.getTraceback().replace('{', '').replace(
            '}', '')
        if self._restart_on_error:
            self.log.warn(
                f'Unhandled error: {cleaned_traceback}. Attempting to restart crawler'
            )
            if not self._node_details_task.running:
                self.start()
        else:
            self.log.critical(f'Unhandled error: {cleaned_traceback}')

    def start(self, eager: bool = False):
        """Start the crawler if not already running"""
        if not self.is_running:
            self.log.info('Starting Crawler...')
            if self._influx_client is None:
                self._influx_client = InfluxDBClient(
                    host=self._db_host,
                    port=self._db_port,
                    database=self.INFLUX_DB_NAME)
                self._initialize_influx()

            if self._crawler_client is None:
                from monitor.db import CrawlerStorageClient
                self._crawler_client = CrawlerStorageClient()

                # TODO: Maybe?
                # from monitor.db import CrawlerInfluxClient
                # self.crawler_influx_client = CrawlerInfluxClient()

            # start tasks
            node_learner_deferred = self._node_details_task.start(
                interval=random.randint(
                    int(self._refresh_rate * (1 - self.REFRESH_RATE_WINDOW)),
                    self._refresh_rate),
                now=eager)
            collection_deferred = self._stats_collection_task.start(
                interval=random.randint(
                    self._refresh_rate,
                    int(self._refresh_rate * (1 + self.REFRESH_RATE_WINDOW))),
                now=eager)

            # get known last event block
            self.__events_from_block = self._get_last_known_blocknumber()
            events_deferred = self._events_collection_task.start(
                interval=self._refresh_rate, now=eager)

            # hookup error callbacks
            node_learner_deferred.addErrback(self._handle_errors)
            collection_deferred.addErrback(self._handle_errors)
            events_deferred.addErrback(self._handle_errors)

            # Start up
            self.start_learning_loop(now=False)
            self.make_flask_server()
            hx_deployer = HendrixDeploy(action="start",
                                        options={
                                            "wsgi": self._flask,
                                            "http_port":
                                            self._crawler_http_port
                                        })
            hx_deployer.run()  # <--- Blocking Call to Reactor

    def stop(self):
        """Stop the crawler if currently running"""
        if self.is_running:
            self.log.info('Stopping Monitor Crawler')

            # stop tasks
            self._node_details_task.stop()
            self._events_collection_task.stop()
            self._stats_collection_task.stop()

            if self._influx_client is not None:
                self._influx_client.close()
                self._influx_client = None

    @property
    def is_running(self):
        """Returns True if currently running, False otherwise"""
        return self._node_details_task.running

    def _get_last_known_blocknumber(self):
        last_known_blocknumber = 0
        blocknumber_result = list(
            self._influx_client.query(
                f'SELECT MAX(block_number) from {self.EVENT_MEASUREMENT}').
            get_points())
        if len(blocknumber_result) > 0:
            last_known_blocknumber = blocknumber_result[0]['max']

        return last_known_blocknumber
Пример #24
0
class WebController(CharacterController):
    """
    A wrapper around a JSON control interface that
    handles web requests to exert control over a character.
    """

    _emitter_class = WebEmitter
    _crash_on_error_default = False

    _captured_status_codes = {
        200: 'OK',
        400: 'BAD REQUEST',
        500: 'INTERNAL SERVER ERROR'
    }

    def __init__(self,
                 app_name: str,
                 character_contoller: CharacterController,
                 start_learning: bool = True,
                 crash_on_error: bool = _crash_on_error_default):

        self.app_name = app_name

        # Configuration
        self.start_learning = start_learning
        self.crash_on_error = crash_on_error

        # Control Cycle Handler
        self.emitter = self._emitter_class()

        # Internals
        self._web_app = NO_WEB_APP_ATTACHED
        self._captured_status_codes = NO_WEB_APP_ATTACHED

        # Hard-wire the character's output flow to the WebEmitter
        self._internal_controller = character_contoller
        self._internal_controller.emitter = self.emitter

        super().__init__(
            control_serializer=self._internal_controller.serializer)

        self.log = Logger(app_name)

    def make_web_controller(self):

        # Serialize For WSGI <-> Bytes <-> Unicode <-> JSON <-> Hex/B64 <-> Native Requests
        self._internal_controller.serialize = True
        self._web_app = Flask(self.app_name)

        # Return FlaskApp decorator
        return self._web_app

    def start(self, http_port: int, dry_run: bool = False):

        self.log.info("Starting HTTP Character Control...")

        if dry_run:
            return

        # TODO #845: Make non-blocking web control startup
        hx_deployer = HendrixDeploy(action="start",
                                    options={
                                        "wsgi": self._web_app,
                                        "http_port": http_port
                                    })
        hx_deployer.run()  # <--- Blocking Call to Reactor

    def __call__(self, *args, **kwargs):
        return self.__handle_request(*args, **kwargs)

    def __handle_request(self, interface, control_request, *args,
                         **kwargs) -> Response:

        interface_name = interface.__name__

        _400_exceptions = (CharacterSpecification.MissingField,
                           CharacterSpecification.InvalidInputField,
                           CharacterControlSerializer.SerializerError)
        try:
            response = interface(request=control_request.data, *args,
                                 **kwargs)  # < ------- INLET

        #
        # Client Errors
        #
        except _400_exceptions as e:
            __exception_code = 400
            return self.emitter(e=e,
                                log_level='debug',
                                response_code=__exception_code,
                                error_message=WebController.
                                _captured_status_codes[__exception_code])

        #
        # Server Errors
        #
        except CharacterSpecification.SpecificationError as e:
            __exception_code = 500
            return self.emitter(e=e,
                                log_level='critical',
                                response_code=__exception_code,
                                error_message=WebController.
                                _captured_status_codes[__exception_code])

        #
        # Unhandled Server Errors
        #
        except Exception as e:
            __exception_code = 500
            if self.crash_on_error:
                raise
            return self.emitter(e=e,
                                log_level='debug',
                                response_code=__exception_code,
                                error_message=WebController.
                                _captured_status_codes[__exception_code])

        #
        # Send to Emitter
        #
        else:
            self.log.debug(f"{interface_name} [200 - OK]"
                           )  # TODO - include interface name in metadata
            return response
Пример #25
0
def test_collect_rewards_integration(click_runner,
                                     testerchain,
                                     agency_local_registry,
                                     stakeholder_configuration_file_location,
                                     blockchain_alice,
                                     blockchain_bob,
                                     random_policy_label,
                                     manual_staker,
                                     manual_worker,
                                     token_economics,
                                     mock_transacting_power_activation,
                                     policy_value,
                                     policy_rate):

    half_stake_time = token_economics.minimum_locked_periods // 2  # Test setup
    logger = Logger("Test-CLI")  # Enter the Teacher's Logger, and
    current_period = 0  # State the initial period for incrementing

    staker_address = manual_staker
    worker_address = manual_worker

    staker = Staker(is_me=True, checksum_address=staker_address, registry=agency_local_registry)
    staker.stakes.refresh()

    # The staker is staking.
    assert staker.is_staking
    assert staker.stakes
    assert staker.worker_address == worker_address

    ursula_port = select_test_port()
    ursula = Ursula(is_me=True,
                    checksum_address=staker_address,
                    worker_address=worker_address,
                    registry=agency_local_registry,
                    rest_host='127.0.0.1',
                    rest_port=ursula_port,
                    network_middleware=MockRestMiddleware())

    MOCK_KNOWN_URSULAS_CACHE[ursula_port] = ursula
    assert ursula.worker_address == worker_address
    assert ursula.checksum_address == staker_address

    mock_transacting_power_activation(account=worker_address, password=INSECURE_DEVELOPMENT_PASSWORD)

    # Confirm for half the first stake duration
    for _ in range(half_stake_time):
        logger.debug(f">>>>>>>>>>> TEST PERIOD {current_period} <<<<<<<<<<<<<<<<")
        ursula.confirm_activity()
        testerchain.time_travel(periods=1)
        current_period += 1

    # Alice creates a policy and grants Bob access
    blockchain_alice.selection_buffer = 1

    M, N = 1, 1
    days = 3
    now = testerchain.w3.eth.getBlock(block_identifier='latest').timestamp
    expiration = maya.MayaDT(now).add(days=days-1)
    blockchain_policy = blockchain_alice.grant(bob=blockchain_bob,
                                               label=random_policy_label,
                                               m=M, n=N,
                                               value=policy_value,
                                               expiration=expiration,
                                               handpicked_ursulas={ursula})

    # Ensure that the handpicked Ursula was selected for the policy
    arrangement = list(blockchain_policy._accepted_arrangements)[0]
    assert arrangement.ursula == ursula

    # Bob learns about the new staker and joins the policy
    blockchain_bob.start_learning_loop()
    blockchain_bob.remember_node(node=ursula)
    blockchain_bob.join_policy(random_policy_label, bytes(blockchain_alice.stamp))

    # Enrico Encrypts (of course)
    enrico = Enrico(policy_encrypting_key=blockchain_policy.public_key,
                    network_middleware=MockRestMiddleware())

    verifying_key = blockchain_alice.stamp.as_umbral_pubkey()

    for index in range(half_stake_time - 5):
        logger.debug(f">>>>>>>>>>> TEST PERIOD {current_period} <<<<<<<<<<<<<<<<")
        ursula.confirm_activity()

        # Encrypt
        random_data = os.urandom(random.randrange(20, 100))
        ciphertext, signature = enrico.encrypt_message(message=random_data)

        # Decrypt
        cleartexts = blockchain_bob.retrieve(ciphertext,
                                             enrico=enrico,
                                             alice_verifying_key=verifying_key,
                                             label=random_policy_label)
        assert random_data == cleartexts[0]

        # Ursula Staying online and the clock advancing
        testerchain.time_travel(periods=1)
        current_period += 1

    # Finish the passage of time for the first Stake
    for _ in range(5):  # plus the extended periods from stake division
        logger.debug(f">>>>>>>>>>> TEST PERIOD {current_period} <<<<<<<<<<<<<<<<")
        ursula.confirm_activity()
        testerchain.time_travel(periods=1)
        current_period += 1

    #
    # WHERES THE MONEY URSULA?? - Collecting Rewards
    #

    # The address the client wants Ursula to send rewards to
    burner_wallet = testerchain.w3.eth.account.create(INSECURE_DEVELOPMENT_PASSWORD)

    # The rewards wallet is initially empty, because it is freshly created
    assert testerchain.client.get_balance(burner_wallet.address) == 0

    # Rewards will be unlocked after the
    # final confirmed period has passed (+1).
    logger.debug(f">>>>>>>>>>> TEST PERIOD {current_period} <<<<<<<<<<<<<<<<")
    testerchain.time_travel(periods=1)
    current_period += 1
    logger.debug(f">>>>>>>>>>> TEST PERIOD {current_period} <<<<<<<<<<<<<<<<")

    # At least half of the tokens are unlocked (restaking was enabled for some prior periods)
    assert staker.locked_tokens() >= token_economics.minimum_allowed_locked

    # Since we are mocking the blockchain connection, manually consume the transacting power of the Staker.
    mock_transacting_power_activation(account=staker_address, password=INSECURE_DEVELOPMENT_PASSWORD)

    # Collect Policy Reward
    collection_args = ('stake', 'collect-reward',
                       '--config-file', stakeholder_configuration_file_location,
                       '--policy-reward',
                       '--no-staking-reward',
                       '--staking-address', staker_address,
                       '--withdraw-address', burner_wallet.address)
    result = click_runner.invoke(nucypher_cli,
                                 collection_args,
                                 input=INSECURE_DEVELOPMENT_PASSWORD,
                                 catch_exceptions=False)
    assert result.exit_code == 0

    # Policy Reward
    collected_policy_reward = testerchain.client.get_balance(burner_wallet.address)
    expected_collection = policy_rate * 30
    assert collected_policy_reward == expected_collection

    # Finish the passage of time... once and for all
    # Extended periods from stake division
    for _ in range(9):
        ursula.confirm_activity()
        current_period += 1
        logger.debug(f">>>>>>>>>>> TEST PERIOD {current_period} <<<<<<<<<<<<<<<<")
        testerchain.time_travel(periods=1)

    #
    # Collect Staking Reward
    #

    balance_before_collecting = staker.token_agent.get_balance(address=staker_address)

    collection_args = ('stake', 'collect-reward',
                       '--config-file', stakeholder_configuration_file_location,
                       '--no-policy-reward',
                       '--staking-reward',
                       '--staking-address', staker_address,
                       '--force')

    result = click_runner.invoke(nucypher_cli,
                                 collection_args,
                                 input=INSECURE_DEVELOPMENT_PASSWORD,
                                 catch_exceptions=False)
    assert result.exit_code == 0

    # The staker has withdrawn her staking rewards
    assert staker.token_agent.get_balance(address=staker_address) >= balance_before_collecting
Пример #26
0
def setup_structured_logging(
    hs,
    config,
    log_config: dict,
    logBeginner: LogBeginner,
    redirect_stdlib_logging: bool = True,
) -> LogPublisher:
    """
    Set up Twisted's structured logging system.

    Args:
        hs: The homeserver to use.
        config (HomeserverConfig): The configuration of the Synapse homeserver.
        log_config (dict): The log configuration to use.
    """
    if config.no_redirect_stdio:
        raise ConfigError(
            "no_redirect_stdio cannot be defined using structured logging.")

    logger = Logger()

    if "drains" not in log_config:
        raise ConfigError(
            "The logging configuration requires a list of drains.")

    observers = []  # type: List[ILogObserver]

    for observer in parse_drain_configs(log_config["drains"]):
        # Pipe drains
        if observer.type == DrainType.CONSOLE:
            logger.debug("Starting up the {name} console logger drain",
                         name=observer.name)
            observers.append(SynapseFileLogObserver(observer.location))
        elif observer.type == DrainType.CONSOLE_JSON:
            logger.debug("Starting up the {name} JSON console logger drain",
                         name=observer.name)
            observers.append(jsonFileLogObserver(observer.location))
        elif observer.type == DrainType.CONSOLE_JSON_TERSE:
            logger.debug(
                "Starting up the {name} terse JSON console logger drain",
                name=observer.name,
            )
            observers.append(
                TerseJSONToConsoleLogObserver(observer.location, metadata={}))

        # File drains
        elif observer.type == DrainType.FILE:
            logger.debug("Starting up the {name} file logger drain",
                         name=observer.name)
            log_file = open(observer.location,
                            "at",
                            buffering=1,
                            encoding="utf8")
            observers.append(SynapseFileLogObserver(log_file))
        elif observer.type == DrainType.FILE_JSON:
            logger.debug("Starting up the {name} JSON file logger drain",
                         name=observer.name)
            log_file = open(observer.location,
                            "at",
                            buffering=1,
                            encoding="utf8")
            observers.append(jsonFileLogObserver(log_file))

        elif observer.type == DrainType.NETWORK_JSON_TERSE:
            metadata = {"server_name": hs.config.server_name}
            log_observer = TerseJSONToTCPLogObserver(
                hs=hs,
                host=observer.location[0],
                port=observer.location[1],
                metadata=metadata,
                maximum_buffer=observer.options.maximum_buffer,
            )
            log_observer.start()
            observers.append(log_observer)
        else:
            # We should never get here, but, just in case, throw an error.
            raise ConfigError("%s drain type cannot be configured" %
                              (observer.type, ))

    publisher = LogPublisher(*observers)
    log_filter = LogLevelFilterPredicate()

    for namespace, namespace_config in log_config.get("loggers",
                                                      DEFAULT_LOGGERS).items():
        # Set the log level for twisted.logger.Logger namespaces
        log_filter.setLogLevelForNamespace(
            namespace,
            stdlib_log_level_to_twisted(namespace_config.get("level", "INFO")),
        )

        # Also set the log levels for the stdlib logger namespaces, to prevent
        # them getting to PythonStdlibToTwistedLogger and having to be formatted
        if "level" in namespace_config:
            logging.getLogger(namespace).setLevel(
                namespace_config.get("level"))

    f = FilteringLogObserver(publisher, [log_filter])
    lco = LogContextObserver(f)

    if redirect_stdlib_logging:
        stuff_into_twisted = PythonStdlibToTwistedLogger(lco)
        stdliblogger = logging.getLogger()
        stdliblogger.addHandler(stuff_into_twisted)

    # Always redirect standard I/O, otherwise other logging outputs might miss
    # it.
    logBeginner.beginLoggingTo([lco], redirectStandardIO=True)

    return publisher
Пример #27
0
class BlockchainInterface:
    """
    Interacts with a solidity compiler and a registry in order to instantiate compiled
    ethereum contracts with the given web3 provider backend.
    """

    TIMEOUT = 600  # seconds
    NULL_ADDRESS = '0x' + '0' * 40

    DEFAULT_GAS_STRATEGY = 'medium'
    GAS_STRATEGIES = {
        'glacial': time_based.glacial_gas_price_strategy,  # 24h
        'slow': time_based.slow_gas_price_strategy,  # 1h
        'medium': time_based.medium_gas_price_strategy,  # 5m
        'fast': time_based.fast_gas_price_strategy  # 60s
    }

    process = NO_PROVIDER_PROCESS.bool_value(False)
    Web3 = Web3

    _contract_factory = VersionedContract

    class InterfaceError(Exception):
        pass

    class NoProvider(InterfaceError):
        pass

    class UnsupportedProvider(InterfaceError):
        pass

    class ConnectionFailed(InterfaceError):
        pass

    class UnknownContract(InterfaceError):
        pass

    class NotEnoughConfirmations(InterfaceError):
        pass

    def __init__(
            self,
            emitter=None,  # TODO # 1754
            poa: bool = False,
            light: bool = False,
            provider_process: NuCypherGethProcess = NO_PROVIDER_PROCESS,
            provider_uri: str = NO_BLOCKCHAIN_CONNECTION,
            provider: Web3Providers = NO_BLOCKCHAIN_CONNECTION,
            gas_strategy: Union[str, Callable] = DEFAULT_GAS_STRATEGY):
        """
        A blockchain "network interface"; The circumflex wraps entirely around the bounds of
        contract operations including compilation, deployment, and execution.

        TODO: #1502 - Move me to docs.

         Filesystem          Configuration           Node              Client                  EVM
        ================ ====================== =============== =====================  ===========================

         Solidity Files -- SolidityCompiler -                      --- HTTPProvider ------ ...
                                            |                    |
                                            |                    |
                                            |                    |
                                            - *BlockchainInterface* -- IPCProvider ----- External EVM (geth, parity...)
                                                       |         |
                                                       |         |
                                                 TestProvider ----- EthereumTester -------------
                                                                                                |
                                                                                                |
                                                                                        PyEVM (Development Chain)

         ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

         Runtime Files --                 --BlockchainInterface ----> Registry
                        |                |             ^
                        |                |             |
                        |                |             |
         Key Files ------ CharacterConfiguration     Agent                          ... (Contract API)
                        |                |             ^
                        |                |             |
                        |                |             |
                        |                |           Actor                          ...Blockchain-Character API)
                        |                |             ^
                        |                |             |
                        |                |             |
         Config File ---                  --------- Character                       ... (Public API)
                                                       ^
                                                       |
                                                     Human


        The Blockchain is the junction of the solidity compiler, a contract registry, and a collection of
        web3 network providers as a means of interfacing with the ethereum blockchain to execute
        or deploy contract code on the network.


        Compiler and Registry Usage
        -----------------------------

        Contracts are freshly re-compiled if an instance of SolidityCompiler is passed; otherwise,
        The registry will read contract data saved to disk that is be used to retrieve contact address and op-codes.
        Optionally, A registry instance can be passed instead.


        Provider Usage
        ---------------
        https: // github.com / ethereum / eth - tester     # available-backends


        * HTTP Provider - Web3 HTTP provider, typically JSON RPC 2.0 over HTTP
        * Websocket Provider - Web3 WS provider, typically JSON RPC 2.0 over WS, supply endpoint uri and websocket=True
        * IPC Provider - Web3 File based IPC provider transported over standard I/O
        * Custom Provider - A pre-initialized web3.py provider instance to attach to this interface

        """

        self.log = Logger('Blockchain')
        self.poa = poa
        self.provider_uri = provider_uri
        self._provider = provider
        self._provider_process = provider_process
        self.w3 = NO_BLOCKCHAIN_CONNECTION
        self.client = NO_BLOCKCHAIN_CONNECTION  # type: Web3Client
        self.transacting_power = READ_ONLY_INTERFACE
        self.is_light = light

        try:
            gas_strategy = self.GAS_STRATEGIES[gas_strategy]
        except KeyError:
            if gas_strategy and not callable(gas_strategy):
                raise ValueError(
                    f"{gas_strategy} must be callable to be a valid gas strategy."
                )
            else:
                gas_strategy = self.GAS_STRATEGIES[self.DEFAULT_GAS_STRATEGY]
        self.gas_strategy = gas_strategy

    def __repr__(self):
        r = '{name}({uri})'.format(name=self.__class__.__name__,
                                   uri=self.provider_uri)
        return r

    @classmethod
    def from_dict(cls, payload: dict, **overrides) -> 'BlockchainInterface':
        payload.update({k: v for k, v in overrides.items() if v is not None})
        blockchain = cls(**payload)
        return blockchain

    def to_dict(self) -> dict:
        payload = dict(provider_uri=self.provider_uri,
                       poa=self.poa,
                       light=self.is_light)
        return payload

    @property
    def is_connected(self) -> bool:
        """
        https://web3py.readthedocs.io/en/stable/__provider.html#examples-using-automated-detection
        """
        if self.client is NO_BLOCKCHAIN_CONNECTION:
            return False
        return self.client.is_connected

    def attach_middleware(self):

        # For use with Proof-Of-Authority test-blockchains
        if self.poa is True:
            self.log.debug('Injecting POA middleware at layer 0')
            self.client.inject_middleware(geth_poa_middleware, layer=0)

        # Gas Price Strategy
        # TODO: Do we need to use all of these at once, perhaps chhose one?
        self.client.w3.eth.setGasPriceStrategy(self.gas_strategy)
        self.client.w3.middleware_onion.add(
            middleware.time_based_cache_middleware)
        self.client.w3.middleware_onion.add(
            middleware.latest_block_based_cache_middleware)
        self.client.w3.middleware_onion.add(middleware.simple_cache_middleware)

    def connect(self):

        # Spawn child process
        if self._provider_process:
            self._provider_process.start()
            provider_uri = self._provider_process.provider_uri(scheme='file')
        else:
            provider_uri = self.provider_uri
            self.log.info(
                f"Using external Web3 Provider '{self.provider_uri}'")

        # Attach Provider
        self._attach_provider(provider=self._provider,
                              provider_uri=provider_uri)
        self.log.info("Connecting to {}".format(self.provider_uri))
        if self._provider is NO_BLOCKCHAIN_CONNECTION:
            raise self.NoProvider(
                "There are no configured blockchain providers")

        # Connect if not connected
        try:
            self.w3 = self.Web3(provider=self._provider)
            self.client = Web3Client.from_w3(w3=self.w3)
        except requests.ConnectionError:  # RPC
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is RPC enabled?'
            )
        except FileNotFoundError:  # IPC File Protocol
            raise self.ConnectionFailed(
                f'Connection Failed - {str(self.provider_uri)} - is IPC enabled?'
            )
        else:
            self.attach_middleware()

        return self.is_connected

    def sync(self, emitter=None) -> None:

        sync_state = self.client.sync()
        if emitter is not None:

            emitter.echo(
                f"Syncing: {self.client.chain_name.capitalize()}. Waiting for sync to begin.",
                verbosity=1)

            while not len(self.client.peers):
                emitter.echo("waiting for peers...", verbosity=1)
                time.sleep(5)

            peer_count = len(self.client.peers)
            emitter.echo(
                f"Found {'an' if peer_count == 1 else peer_count} Ethereum peer{('s' if peer_count > 1 else '')}.",
                verbosity=1)

            try:
                emitter.echo("Beginning sync...", verbosity=1)
                initial_state = next(sync_state)
            except StopIteration:  # will occur if no syncing needs to happen
                emitter.echo("Local blockchain data is already synced.",
                             verbosity=1)
                return

            prior_state = initial_state
            total_blocks_to_sync = int(initial_state.get(
                'highestBlock', 0)) - int(initial_state.get('currentBlock', 0))
            with click.progressbar(
                    length=total_blocks_to_sync,
                    label="sync progress",
                    file=emitter.get_stream(verbosity=1)) as bar:
                for syncdata in sync_state:
                    if syncdata:
                        blocks_accomplished = int(
                            syncdata['currentBlock']) - int(
                                prior_state.get('currentBlock', 0))
                        bar.update(blocks_accomplished)
                        prior_state = syncdata
        else:
            try:
                for syncdata in sync_state:
                    self.client.log.info(
                        f"Syncing {syncdata['currentBlock']}/{syncdata['highestBlock']}"
                    )
            except TypeError:  # it's already synced
                return
        return

    @property
    def provider(self) -> Union[IPCProvider, WebsocketProvider, HTTPProvider]:
        return self._provider

    def _attach_provider(self,
                         provider: Web3Providers = None,
                         provider_uri: str = None) -> None:
        """
        https://web3py.readthedocs.io/en/latest/providers.html#providers
        """

        if not provider_uri and not provider:
            raise self.NoProvider("No URI or provider instances supplied.")

        if provider_uri and not provider:
            uri_breakdown = urlparse(provider_uri)

            if uri_breakdown.scheme == 'tester':
                providers = {
                    'pyevm': _get_tester_pyevm,
                    'geth': _get_test_geth_parity_provider,
                    'parity-ethereum': _get_test_geth_parity_provider,
                }
                provider_scheme = uri_breakdown.netloc

            else:
                providers = {
                    'auto': _get_auto_provider,
                    'infura': _get_infura_provider,
                    'ipc': _get_IPC_provider,
                    'file': _get_IPC_provider,
                    'ws': _get_websocket_provider,
                    'http': _get_HTTP_provider,
                    'https': _get_HTTP_provider,
                }
                provider_scheme = uri_breakdown.scheme

            # auto-detect for file based ipc
            if not provider_scheme:
                if os.path.exists(provider_uri):
                    # file is available - assume ipc/file scheme
                    provider_scheme = 'file'
                    self.log.info(
                        f"Auto-detected provider scheme as 'file://' for provider {provider_uri}"
                    )

            try:
                self._provider = providers[provider_scheme](provider_uri)
            except KeyError:
                raise self.UnsupportedProvider(
                    f"{provider_uri} is an invalid or unsupported blockchain provider URI"
                )
            else:
                self.provider_uri = provider_uri or NO_BLOCKCHAIN_CONNECTION
        else:
            self._provider = provider

    @validate_checksum_address
    def build_transaction(
        self,
        contract_function: ContractFunction,
        sender_address: str,
        payload: dict = None,
        transaction_gas_limit: int = None,
    ) -> dict:

        #
        # Build
        #

        if not payload:
            payload = {}

        nonce = self.client.w3.eth.getTransactionCount(sender_address,
                                                       'pending')
        payload.update({
            'chainId': int(self.client.chain_id),
            'nonce': nonce,
            'from': sender_address,
            'gasPrice': self.client.gas_price
        })

        if transaction_gas_limit:
            payload['gas'] = int(transaction_gas_limit)

        # Get transaction type
        deployment = isinstance(contract_function, ContractConstructor)
        try:
            transaction_name = contract_function.fn_name.upper()
        except AttributeError:
            transaction_name = 'DEPLOY' if deployment else 'UNKNOWN'

        payload_pprint = dict(payload)
        payload_pprint['from'] = to_checksum_address(payload['from'])
        payload_pprint.update({
            f: prettify_eth_amount(v)
            for f, v in payload.items() if f in ('gasPrice', 'value')
        })
        payload_pprint = ', '.join("{}: {}".format(k, v)
                                   for k, v in payload_pprint.items())
        self.log.debug(f"[TX-{transaction_name}] | {payload_pprint}")

        # Build transaction payload
        try:
            unsigned_transaction = contract_function.buildTransaction(payload)
        except (ValidationError, ValueError) as e:
            # TODO: #1504 - Handle validation failures for gas limits, invalid fields, etc.
            # Note: Geth raises ValueError in the same condition that pyevm raises ValidationError here.
            # Treat this condition as "Transaction Failed".
            error = str(e).replace("{", "").replace("}", "")  # See #724
            self.log.critical(f"Validation error: {error}")
            raise
        else:
            if deployment:
                self.log.info(
                    f"Deploying contract: {len(unsigned_transaction['data'])} bytes"
                )

        return unsigned_transaction

    def sign_and_broadcast_transaction(self,
                                       unsigned_transaction,
                                       transaction_name: str = "",
                                       confirmations: int = 0) -> dict:

        #
        # Setup
        #

        # TODO # 1754
        # TODO: Move this to singleton - I do not approve... nor does Bogdan?
        if GlobalLoggerSettings._json_ipc:
            emitter = JSONRPCStdoutEmitter()
        else:
            emitter = StdoutEmitter()

        if self.transacting_power is READ_ONLY_INTERFACE:
            raise self.InterfaceError(str(READ_ONLY_INTERFACE))

        #
        # Sign
        #

        # TODO: Show the USD Price
        # Price Oracle
        # https://api.coinmarketcap.com/v1/ticker/ethereum/
        price = unsigned_transaction['gasPrice']
        cost_wei = price * unsigned_transaction['gas']
        cost = Web3.fromWei(cost_wei, 'gwei')

        if self.transacting_power.device:
            emitter.message(
                f'Confirm transaction {transaction_name} on hardware wallet... ({cost} gwei @ {price})',
                color='yellow')
        signed_raw_transaction = self.transacting_power.sign_transaction(
            unsigned_transaction)

        #
        # Broadcast
        #

        emitter.message(
            f'Broadcasting {transaction_name} Transaction ({cost} gwei @ {price})...',
            color='yellow')
        txhash = self.client.send_raw_transaction(signed_raw_transaction)
        try:
            receipt = self.client.wait_for_receipt(txhash,
                                                   timeout=self.TIMEOUT)
        except TimeExhausted:
            # TODO: #1504 - Handle transaction timeout
            raise
        else:
            self.log.debug(
                f"[RECEIPT-{transaction_name}] | txhash: {receipt['transactionHash'].hex()}"
            )

        #
        # Confirm
        #

        # Primary check
        deployment_status = receipt.get('status', UNKNOWN_TX_STATUS)
        if deployment_status == 0:
            failure = f"Transaction transmitted, but receipt returned status code 0. " \
                      f"Full receipt: \n {pprint.pformat(receipt, indent=2)}"
            raise self.InterfaceError(failure)

        if deployment_status is UNKNOWN_TX_STATUS:
            self.log.info(
                f"Unknown transaction status for {txhash} (receipt did not contain a status field)"
            )

            # Secondary check
            tx = self.client.get_transaction(txhash)
            if tx["gas"] == receipt["gasUsed"]:
                raise self.InterfaceError(
                    f"Transaction consumed 100% of transaction gas."
                    f"Full receipt: \n {pprint.pformat(receipt, indent=2)}")

        # Block confirmations
        if confirmations:
            start = maya.now()
            confirmations_so_far = self.get_confirmations(receipt)
            while confirmations_so_far < confirmations:
                self.log.info(
                    f"So far, we've only got {confirmations_so_far} confirmations. "
                    f"Waiting for {confirmations - confirmations_so_far} more."
                )
                time.sleep(3)
                confirmations_so_far = self.get_confirmations(receipt)
                if (maya.now() - start).seconds > self.TIMEOUT:
                    raise self.NotEnoughConfirmations

        return receipt

    def get_confirmations(self, receipt: dict) -> int:
        tx_block_number = receipt.get('blockNumber')
        latest_block_number = self.w3.eth.blockNumber
        confirmations = latest_block_number - tx_block_number
        if confirmations < 0:
            raise ValueError(
                f"Can't get number of confirmations for transaction {receipt['transactionHash'].hex()}, "
                f"as it seems to come from {-confirmations} blocks in the future..."
            )
        return confirmations

    @validate_checksum_address
    def send_transaction(self,
                         contract_function: Union[ContractFunction,
                                                  ContractConstructor],
                         sender_address: str,
                         payload: dict = None,
                         transaction_gas_limit: int = None,
                         confirmations: int = 0) -> dict:

        transaction = self.build_transaction(
            contract_function=contract_function,
            sender_address=sender_address,
            payload=payload,
            transaction_gas_limit=transaction_gas_limit)

        try:
            transaction_name = contract_function.fn_name.upper()
        except AttributeError:
            transaction_name = 'DEPLOY' if isinstance(
                contract_function, ContractConstructor) else 'UNKNOWN'

        receipt = self.sign_and_broadcast_transaction(
            unsigned_transaction=transaction,
            transaction_name=transaction_name,
            confirmations=confirmations)
        return receipt

    def get_contract_by_name(
        self,
        registry: BaseContractRegistry,
        contract_name: str,
        contract_version: str = None,
        enrollment_version: Union[int, str] = None,
        proxy_name: str = None,
        use_proxy_address: bool = True
    ) -> Union[VersionedContract, List[tuple]]:
        """
        Instantiate a deployed contract from registry data,
        and assimilate it with its proxy if it is upgradeable,
        or return all registered records if use_proxy_address is False.
        """
        target_contract_records = registry.search(
            contract_name=contract_name, contract_version=contract_version)

        if not target_contract_records:
            raise self.UnknownContract(
                f"No such contract records with name {contract_name}:{contract_version}."
            )

        if proxy_name:

            # Lookup proxies; Search for a published proxy that targets this contract record
            proxy_records = registry.search(contract_name=proxy_name)

            results = list()
            for proxy_name, proxy_version, proxy_address, proxy_abi in proxy_records:
                proxy_contract = self.client.w3.eth.contract(
                    abi=proxy_abi,
                    address=proxy_address,
                    version=proxy_version,
                    ContractFactoryClass=self._contract_factory)

                # Read this dispatcher's target address from the blockchain
                proxy_live_target_address = proxy_contract.functions.target(
                ).call()
                for target_name, target_version, target_address, target_abi in target_contract_records:

                    if target_address == proxy_live_target_address:
                        if use_proxy_address:
                            triplet = (proxy_address, target_version,
                                       target_abi)
                        else:
                            triplet = (target_address, target_version,
                                       target_abi)
                    else:
                        continue

                    results.append(triplet)

            if len(results) > 1:
                address, _version, _abi = results[0]
                message = "Multiple {} deployments are targeting {}".format(
                    proxy_name, address)
                raise self.InterfaceError(message.format(contract_name))

            else:
                try:
                    selected_address, selected_version, selected_abi = results[
                        0]
                except IndexError:
                    raise self.UnknownContract(
                        f"There are no Dispatcher records targeting '{contract_name}':{contract_version}"
                    )

        else:
            # TODO: use_proxy_address doesnt' work in this case. Should we raise if used?

            # NOTE: 0 must be allowed as a valid version number
            if len(target_contract_records) != 1:
                if enrollment_version is None:
                    m = f"{len(target_contract_records)} records enrolled " \
                        f"for contract {contract_name}:{contract_version} " \
                        f"and no version index was supplied."
                    raise self.InterfaceError(m)
                enrollment_version = self.__get_enrollment_version_index(
                    name=contract_name,
                    contract_version=contract_version,
                    version_index=enrollment_version,
                    enrollments=len(target_contract_records))

            else:
                enrollment_version = -1  # default

            _contract_name, selected_version, selected_address, selected_abi = target_contract_records[
                enrollment_version]

        # Create the contract from selected sources
        unified_contract = self.client.w3.eth.contract(
            abi=selected_abi,
            address=selected_address,
            version=selected_version,
            ContractFactoryClass=self._contract_factory)

        return unified_contract

    @staticmethod
    def __get_enrollment_version_index(version_index: Union[int, str],
                                       enrollments: int, name: str,
                                       contract_version: str):
        version_names = {'latest': -1, 'earliest': 0}
        try:
            version = version_names[version_index]
        except KeyError:
            try:
                version = int(version_index)
            except ValueError:
                what_is_this = version_index
                raise ValueError(
                    f"'{what_is_this}' is not a valid enrollment version number"
                )
            else:
                if version > enrollments - 1:
                    message = f"Version index '{version}' is larger than the number of enrollments " \
                              f"for {name}:{contract_version}."
                    raise ValueError(message)
        return version
Пример #28
0
def main():
    """
    Run the server.
    """

    parser = argparse.ArgumentParser(
        description='Resolve DNS queries from Database')
    parser.add_argument('-c', '--config',
        dest='config_file',
        type=str, action='store',
        default='./config.yml',
        help='Path to the configuration file'
    )
    parser.add_argument('--port', '-p',
        dest='port',
        type=int, action='store',
        default=10053,
        help='Port number for the service'
    )
    parser.add_argument('--dry-run', '-d',
        dest='dry_run',
        action='store_true',
        help='Dry run, just check the config file'
    )
    #parser.add_argument('--verbose', '-v',
    #    dest='verbose',
    #    action='store_true',
    #    help='Be verbose'
    #)
    params = parser.parse_args()

    # Log to stdout, as this is intended to run in docker
    log.startLogging(sys.stdout)
    # Make new logging style compatible to traditional one
    def observer(event, log=log):
        log.msg(event['log_format'].format(**event))
        if 'log_failure' in event:
            log.err(event['log_failure'])
    logger = Logger(namespace='default', observer=observer)

    # Read config file
    config = Config(params.config_file, logger)
    logger.debug("Running with the following parameters:\n{data}", data=config)

    # Dry run
    if params.dry_run:
        sys.exit(0)
    
    # Build a connection lasting the lifetime of the service
    connection = adbapi.ConnectionPool(
        config.db_driver,
        host=config.db_host,
        port=config.db_port,
        user=config.db_user,
        passwd=config.db_passwd,
        db=config.db_name,
        cp_reconnect=True
    )

    # Build a global Resolver lasting the lifetime of the service
    resolver = client.createResolver()
    customResolver = DynamicResolver(config, connection, resolver, logger)

    # Factory and protocol services
    factory  = server.DNSServerFactory(
        caches=[
            cache.CacheResolver(),
        ],
        # Use "clients" instead of "authorities", so caching works
        clients=[
            hosts.Resolver(file=config.dns_hosts, ttl=config.dns_ttl),
            customResolver,
        ]
    )
    protocol = dns.DNSDatagramProtocol(controller=factory)

    # Start polling loop, to avoid timeouts
    poller = LoopingCall(customResolver.poll)
    poller.start(config.poll_time)

    # Listen TCP and UDP
    reactor.listenUDP(params.port, protocol)
    reactor.listenTCP(params.port, factory)
    reactor.run()
Пример #29
0
class PhotometerService(Service):

    BUFFER_SIZE = 1

    def __init__(self, options, label):

        self.options   = options
        self.label     = label
        self.namespace = self.label.upper()
        setLogLevel(namespace=self.namespace,  levelStr=options['log_messages'])
        setLogLevel(namespace=self.label,      levelStr=options['log_level'])
        self.log       = Logger(namespace=self.label)
        self.factory   = self.buildFactory()
        self.protocol  = None
        self.serport   = None
        self.buffer    = CircularBuffer(self.BUFFER_SIZE, self.log)
        self.counter   = 0
        # Handling of Asynchronous getInfo()
        self.info = None
        self.info_deferred = None
        if options['old_firmware']:
            self.info = {
                'name'  : self.options['name'],
                'mac'   : self.options['mac_address'],
                'calib' : self.options['zp'],
                'rev'   : 2,
                }
        
        # Serial port Handling
        parts = chop(self.options['endpoint'], sep=':')
        if parts[0] != 'serial':
            self.log.critical("Incorrect endpoint type {ep}, should be 'serial'", ep=parts[0])
            raise NotImplementedError
          
    
    def startService(self):
        '''
        Starts the photometer service listens to a TESS
        Although it is technically a synchronous operation, it works well
        with inline callbacks
        '''
        self.log.info("starting {name}", name=self.name)
        self.connect()
       


    def stopService(self):
        self.log.warn("stopping {name}", name=self.name)
        self.protocol.transport.loseConnection()
        self.protocol = None
        self.serport  = None
        #self.parent.childStopped(self)
        return defer.succeed(None)

    #---------------------
    # Extended Service API
    # --------------------

    @inlineCallbacks
    def reloadService(self, new_options):
        '''
        Reload configuration.
        Returns a Deferred
        '''
        options = options[self.label]
        setLogLevel(namespace=self.label,     levelStr=options['log_level'])
        setLogLevel(namespace=self.namespace, levelStr=options['log_messages'])
        self.options = options
        return defer.succeed(None)
      
    # -----------------------
    # Specific photometer API
    # -----------------------

    def handleInfo(self, reading):
        if self.info_deferred is not None:
            self.info = {
                'name'  : reading.get('name', None),
                'calib' : reading.get('ZP', None),
                'mac'   : self.options['mac_address'],
                'rev'   : 2,
            }
            self.log.info("Photometer Info: {info}", info=self.info)
            self.info_deferred.callback(self.info)
            self.info_deferred = None


    def curate(self, reading):
        '''Readings ready for MQTT Tx according to our wire protocol'''
        reading['seq'] = self.counter
        self.counter += 1
        self.last_tstamp = reading.pop('tstamp', None)
        if self.options['old_firmware']:
            reading['mag']  = round(self.options['zp'] - 2.5*math.log10(reading['freq']),2)
            reading['rev']  = 2
            reading['name'] = self.options['name']
            reading['alt']  = 0.0
            reading['azi']  = 0.0
            reading['wdBm'] = 0
            reading.pop('zp', None)
        else:
            reading['mag']  = round(reading['ZP'] - 2.5*math.log10(reading['freq']),2)
            self.info = {
                'name'  : reading.get('name', None),
                'calib' : reading.get('ZP', None),
                'mac'   : self.options['mac_address'],
                'rev'   : 2,
            }
            reading.pop('udp', None)
            reading.pop('ain', None)
            reading.pop('ZP',  None)
        return reading

    
    def getInfo(self):
        '''Asynchronous operations'''
        if not self.options['old_firmware'] and self.info is None:
            deferred = defer.Deferred()
            deferred.addTimeout(60, reactor)
            self.info_deferred = deferred
        else:
            self.log.info("Photometer Info: {info}", info=self.info)
            deferred = defer.succeed(self.info)
        return deferred

    # --------------
    # Helper methods
    # ---------------

    def connect(self):
        parts = chop(self.options['endpoint'], sep=':')
        endpoint = parts[1:]
        self.protocol = self.factory.buildProtocol(0)
        try:
            self.serport  = SerialPort(self.protocol, endpoint[0], reactor, baudrate=endpoint[1])
        except Exception as e:
            self.log.error("{excp}",excp=e)
            self.protocol = None
        else:
            self.gotProtocol(self.protocol)
            self.log.info("Using serial port {tty} @ {baud} bps", tty=endpoint[0], baud=endpoint[1])
    
    
    def buildFactory(self):
        self.log.debug("Choosing a {model} factory", model=TESSW)
        import tessw.tessw
        factory = tessw.tessw.TESSProtocolFactory(self.namespace, self.options['old_firmware'])
        return factory


    def gotProtocol(self, protocol):
        self.log.debug("got protocol")
        self.buffer.registerProducer(protocol, True)
        self.protocol  = protocol
Пример #30
0
class PostgresListenerService(Service):
    """Listens for NOTIFY messages from postgres.

    A new connection is made to postgres with the isolation level of
    autocommit. This connection is only used for listening for notifications.
    Any query that needs to take place because of a notification should use
    its own connection. This class runs inside of the reactor. Any long running
    action that occurrs based on a notification should defer its action to a
    thread to not block the reactor.

    :ivar connection: A database connection within one of Django's wrapper.
    :ivar connectionFileno: The fileno of the underlying database connection.
    :ivar connecting: a :class:`Deferred` while connecting, `None` at all
        other times.
    :ivar disconnecting: a :class:`Deferred` while disconnecting, `None`
        at all other times.
    """

    # Seconds to wait to handle new notifications. When the notifications set
    # is empty it will wait this amount of time to check again for new
    # notifications.
    HANDLE_NOTIFY_DELAY = 0.5
    CHANNEL_REGISTRAR_DELAY = 0.5

    def __init__(self, alias="default"):
        self.alias = alias
        self.listeners = defaultdict(list)
        self.autoReconnect = False
        self.connection = None
        self.connectionFileno = None
        self.notifications = set()
        self.notifier = task.LoopingCall(self.handleNotifies)
        self.notifierDone = None
        self.connecting = None
        self.disconnecting = None
        self.registeredChannels = set()
        self.channelRegistrar = task.LoopingCall(
            lambda: ensureDeferred(self.registerChannels()))
        self.channelRegistrarDone = None
        self.log = Logger(__name__, self)
        self.events = EventGroup("connected", "disconnected")
        # the connection object isn't threadsafe, so we need to lock in order
        # to use it in different threads
        self._db_lock = threading.RLock()

    def startService(self):
        """Start the listener."""
        super().startService()
        self.autoReconnect = True
        return self.tryConnection()

    def stopService(self):
        """Stop the listener."""
        super().stopService()
        self.autoReconnect = False
        return self.loseConnection()

    def connected(self):
        """Return True if connected."""
        if self.connection is None:
            return False
        if self.connection.connection is None:
            return False
        return self.connection.connection.closed == 0

    def logPrefix(self):
        """Return nice name for twisted logging.

        This is required to satisfy `IReadDescriptor`, which inherits from
        `ILoggingContext`.
        """
        return self.log.namespace

    def isSystemChannel(self, channel):
        """Return True if channel is a system channel."""
        return channel.startswith("sys_")

    def doRead(self):
        """Poll the connection and process any notifications."""
        with self._db_lock:
            try:
                self.connection.connection.poll()
            except Exception:
                # If the connection goes down then `OperationalError` is raised.
                # It contains no pgcode or pgerror to identify the reason so no
                # special consideration can be made for it. Hence all errors are
                # treated the same, and we assume that the connection is broken.
                #
                # We do NOT return a failure, which would signal to the reactor
                # that the connection is broken in some way, because the reactor
                # will end up removing this instance from its list of selectables
                # but not from its list of readable fds, or something like that.
                # The point is that the reactor's accounting gets muddled. Things
                # work correctly if we manage the disconnection ourselves.
                #
                self.loseConnection(Failure(error.ConnectionLost()))
            else:
                self._process_notifies()

    def fileno(self):
        """Return the fileno of the connection."""
        return self.connectionFileno

    def startReading(self):
        """Add this listener to the reactor."""
        self.connectionFileno = self.connection.connection.fileno()
        reactor.addReader(self)

    def stopReading(self):
        """Remove this listener from the reactor."""
        try:
            reactor.removeReader(self)
        except IOError as error:
            # ENOENT here means that the fd has already been unregistered
            # from the underlying poller. It is as yet unclear how we get
            # into this state, so for now we ignore it. See epoll_ctl(2).
            if error.errno != ENOENT:
                raise
        finally:
            self.connectionFileno = None

    def register(self, channel, handler):
        """Register listening for notifications from a channel.

        When a notification is received for that `channel` the `handler` will
        be called with the action and object id.
        """
        self.log.debug(f"Register on {channel} with handler {handler}")
        handlers = self.listeners[channel]
        if self.isSystemChannel(channel) and len(handlers) > 0:
            # A system can only be registered once. This is because the
            # message is passed directly to the handler and the `doRead`
            # method does not wait for it to finish if its a defer. This is
            # different from normal handlers where we will call each and wait
            # for all to resolve before continuing to the next event.
            raise PostgresListenerRegistrationError(
                "System channel '%s' has already been registered." % channel)
        else:
            handlers.append(handler)
        self.runChannelRegistrar()

    def unregister(self, channel, handler):
        """Unregister listening for notifications from a channel.

        `handler` needs to be same handler that was registered.
        """
        self.log.debug(f"Unregister on {channel} with handler {handler}")
        if channel not in self.listeners:
            raise PostgresListenerUnregistrationError(
                "Channel '%s' is not registered with the listener." % channel)
        handlers = self.listeners[channel]
        if handler in handlers:
            handlers.remove(handler)
        else:
            raise PostgresListenerUnregistrationError(
                "Handler is not registered on that channel '%s'." % channel)
        if len(handlers) == 0:
            # Channels have already been registered. Unregister the channel.
            del self.listeners[channel]
        self.runChannelRegistrar()

    @synchronous
    def createConnection(self):
        """Create new database connection."""
        db = connections.databases[self.alias]
        backend = load_backend(db["ENGINE"])
        return backend.DatabaseWrapper(db, self.alias)

    @synchronous
    def startConnection(self):
        """Start the database connection."""
        self.connection = self.createConnection()
        self.connection.connect()
        self.connection.set_autocommit(True)
        self.connection.inc_thread_sharing()

    @synchronous
    def stopConnection(self):
        """Stop database connection."""
        # The connection is often in an unexpected state here -- for
        # unexplained reasons -- so be careful when unpealing layers.
        connection_wrapper, self.connection = self.connection, None
        if connection_wrapper is not None:
            connection = connection_wrapper.connection
            if connection is not None and not connection.closed:
                connection_wrapper.dec_thread_sharing()
                connection_wrapper.commit()
                connection_wrapper.close()

    def tryConnection(self):
        """Keep retrying to make the connection."""
        if self.connecting is None:
            if self.disconnecting is not None:
                raise RuntimeError(
                    "Cannot attempt to make new connection before "
                    "pending disconnection has finished.")

            def cb_connect(_):
                self.log.info("Listening for database notifications.")

            def eb_connect(failure):
                self.log.error(
                    "Unable to connect to database: {error}",
                    error=failure.getErrorMessage(),
                )
                if failure.check(CancelledError):
                    return failure
                elif self.autoReconnect:
                    return deferLater(reactor, 3, connect)
                else:
                    return failure

            def connect(interval=self.HANDLE_NOTIFY_DELAY):
                d = deferToThread(self.startConnection)
                d.addCallback(callOut, self.runChannelRegistrar)
                d.addCallback(lambda result: self.channelRegistrarDone)
                d.addCallback(callOut, self.events.connected.fire)
                d.addCallback(callOut, self.startReading)
                d.addCallback(callOut, self.runHandleNotify, interval)
                # On failure ensure that the database connection is stopped.
                d.addErrback(callOut, deferToThread, self.stopConnection)
                d.addCallbacks(cb_connect, eb_connect)
                return d

            def done():
                self.connecting = None

            self.connecting = connect().addBoth(callOut, done)

        return self.connecting

    def loseConnection(self, reason=Failure(error.ConnectionDone())):
        """Request that the connection be dropped."""
        if self.disconnecting is None:
            self.registeredChannels.clear()
            d = self.disconnecting = Deferred()
            d.addBoth(callOut, self.stopReading)
            d.addBoth(callOut, self.cancelChannelRegistrar)
            d.addBoth(callOut, self.cancelHandleNotify)
            d.addBoth(callOut, deferToThread, self.stopConnection)
            d.addBoth(callOut, self.connectionLost, reason)

            def done():
                self.disconnecting = None

            d.addBoth(callOut, done)

            if self.connecting is None:
                # Already/never connected: begin shutdown now.
                self.disconnecting.callback(None)
            else:
                # Still connecting: cancel before disconnect.
                self.connecting.addErrback(suppress, CancelledError)
                self.connecting.chainDeferred(self.disconnecting)
                self.connecting.cancel()

        return self.disconnecting

    def connectionLost(self, reason):
        """Reconnect when the connection is lost."""
        self.connection = None
        if reason.check(error.ConnectionDone):
            self.log.debug("Connection closed.")
        elif reason.check(error.ConnectionLost):
            self.log.debug("Connection lost.")
        else:
            self.log.failure("Connection lost.", reason)
        if self.autoReconnect:
            reactor.callLater(3, self.tryConnection)
        self.events.disconnected.fire(reason)

    def registerChannel(self, channel):
        """Register the channel."""
        self.log.debug(f"Register Channel {channel}")
        with self._db_lock, self.connection.cursor() as cursor:
            if self.isSystemChannel(channel):
                # This is a system channel so listen only called once.
                cursor.execute("LISTEN %s;" % channel)
            else:
                # Not a system channel so listen called once for each action.
                for action in sorted(map_enum(ACTIONS).values()):
                    cursor.execute("LISTEN %s_%s;" % (channel, action))

    def unregisterChannel(self, channel):
        """Unregister the channel."""
        self.log.debug(f"Unregister Channel {channel}")
        with self._db_lock, self.connection.cursor() as cursor:
            if self.isSystemChannel(channel):
                # This is a system channel so unlisten only called once.
                cursor.execute("UNLISTEN %s;" % channel)
            else:
                # Not a system channel so unlisten called once for each action.
                for action in sorted(map_enum(ACTIONS).values()):
                    cursor.execute("UNLISTEN %s_%s;" % (channel, action))

    async def registerChannels(self):
        """Listen/unlisten to channels that were registered/unregistered.

        When a call to register() or unregister() is made, the listeners
        dict is updated, and the keys of that dict represents all the
        channels that we should listen to.

        The service keeps a list of channels that it already listens to
        in the registeredChannels dict. We issue a call to postgres to
        listen to all channels that are in listeners but not in
        registeredChannels, and a call to unlisten for all channels that
        are in registeredChannels but not in listeners.
        """
        to_register = set(self.listeners).difference(self.registeredChannels)
        to_unregister = self.registeredChannels.difference(self.listeners)
        # If there's nothing to do, we can stop the loop. If there is
        # any work to be done, we do the work, and then check
        # whether we should stop at the beginning of the next loop
        # iteration. The reason is that every time we yield, another
        # deferred might call register() or unregister().
        if not to_register and not to_unregister:
            self.channelRegistrar.stop()
        else:
            for channel in to_register:
                await deferToThread(self.registerChannel, channel)
                self.registeredChannels.add(channel)
            for channel in to_unregister:
                await deferToThread(self.unregisterChannel, channel)
                self.registeredChannels.remove(channel)

    def convertChannel(self, channel):
        """Convert the postgres channel to a registered channel and action.

        :raise PostgresListenerNotifyError: When {channel} is not registered or
            {action} is not in `ACTIONS`.
        """
        channel, action = channel.split("_", 1)
        if channel not in self.listeners:
            raise PostgresListenerNotifyError(
                "%s is not a registered channel." % channel)
        if action not in map_enum(ACTIONS).values():
            raise PostgresListenerNotifyError("%s action is not supported." %
                                              action)
        return channel, action

    def runChannelRegistrar(self):
        """Start the loop for listening to channels in postgres.

        It will only start if the service is connected to postgres.
        """
        if self.connection is not None and not self.channelRegistrar.running:
            self.channelRegistrarDone = self.channelRegistrar.start(
                self.CHANNEL_REGISTRAR_DELAY, now=True)

    def cancelChannelRegistrar(self):
        """Stop the loop for listening to channels in postgres."""
        if self.channelRegistrar.running:
            self.channelRegistrar.stop()
            return self.channelRegistrarDone
        else:
            return succeed(None)

    def runHandleNotify(self, delay=0, clock=reactor):
        """Defer later the `handleNotify`."""
        if not self.notifier.running:
            self.notifierDone = self.notifier.start(delay, now=False)

    def cancelHandleNotify(self):
        """Cancel the deferred `handleNotify` call."""
        if self.notifier.running:
            self.notifier.stop()
            return self.notifierDone
        else:
            return succeed(None)

    def handleNotifies(self, clock=reactor):
        """Process all notify message in the notifications set."""
        def gen_notifications(notifications):
            while len(notifications) != 0:
                yield notifications.pop()

        return task.coiterate(
            self.handleNotify(notification, clock=clock)
            for notification in gen_notifications(self.notifications))

    def handleNotify(self, notification, clock=reactor):
        """Process a notify message in the notifications set."""
        channel, payload = notification
        try:
            channel, action = self.convertChannel(channel)
        except PostgresListenerNotifyError:
            # Log the error and continue processing the remaining
            # notifications.
            self.log.failure("Failed to convert channel {channel!r}.",
                             channel=channel)
        else:
            defers = []
            handlers = self.listeners[channel]
            # XXX: There could be an arbitrary number of listeners. Should we
            # limit concurrency here? Perhaps even do one at a time.
            for handler in handlers:
                d = defer.maybeDeferred(handler, action, payload)
                d.addErrback(lambda failure: self.log.failure(
                    "Failure while handling notification to {channel!r}: "
                    "{payload!r}",
                    failure,
                    channel=channel,
                    payload=payload,
                ))
                defers.append(d)
            return defer.DeferredList(defers)

    def _process_notifies(self):
        """Add each notify to to the notifications set.

        This removes duplicate notifications when one entity in the database is
        updated multiple times in a short interval. Accumulating notifications
        and allowing the listener to pick them up in batches is imperfect but
        good enough, and simple.

        """
        notifies = self.connection.connection.notifies
        for notify in notifies:
            if self.isSystemChannel(notify.channel):
                # System level message; pass it to the registered
                # handler immediately.
                if notify.channel in self.listeners:
                    # Be defensive in that if a handler does not exist
                    # for this channel then the channel should be
                    # unregisted and removed from listeners.
                    if len(self.listeners[notify.channel]) > 0:
                        handler = self.listeners[notify.channel][0]
                        handler(notify.channel, notify.payload)
                    else:
                        self.unregisterChannel(notify.channel)
                        del self.listeners[notify.channel]
                else:
                    # Unregister the channel since no listener is
                    # registered for this channel.
                    self.unregisterChannel(notify.channel)
            else:
                # Place non-system messages into the queue to be
                # processed.
                self.notifications.add((notify.channel, notify.payload))
        # Delete the contents of the connection's notifies list so
        # that we don't process them a second time.
        del notifies[:]
Пример #31
0
class SolidityCompiler:

    __default_contract_version = 'v0.0.0'
    __default_contract_dir = os.path.join(dirname(abspath(__file__)), 'source')

    __compiled_contracts_dir = 'contracts'
    __zeppelin_library_dir = 'zeppelin'

    optimization_runs = 200

    class CompilerError(Exception):
        pass

    class VersionError(Exception):
        pass

    @classmethod
    def default_contract_dir(cls):
        return cls.__default_contract_dir

    def __init__(self,
                 solc_binary_path: str = None,
                 source_dirs: List[SourceDirs] = None,
                 ignore_solidity_check: bool = False) -> None:

        self.log = Logger('solidity-compiler')
        self._set_solc_binary_path(solc_binary_path)
        if not ignore_solidity_check:
            self._check_compiler_version()

        if source_dirs is None or len(source_dirs) == 0:
            self.source_dirs = [
                SourceDirs(root_source_dir=self.__default_contract_dir)
            ]
        else:
            self.source_dirs = source_dirs

    def _set_solc_binary_path(self, solc_binary_path: str):
        # Compiler binary and root solidity source code directory
        self.__sol_binary_path = solc_binary_path
        if self.__sol_binary_path is None:
            self.__sol_binary_path = shutil.which('solc')
        if self.__sol_binary_path is None:
            bin_path = os.path.dirname(sys.executable)  # type: str
            self.__sol_binary_path = os.path.join(bin_path,
                                                  'solc')  # type: str

    def _check_compiler_version(self):
        from solc import get_solc_version_string
        raw_solc_version_string = get_solc_version_string(
            solc_binary=self.__sol_binary_path)
        solc_version_search = re.search(
            r"""
             Version:\s          # Beginning of the string
             (\d+\.\d+\.\d+)     # Capture digits of version
             \S+                 # Skip other info in version       
             """, raw_solc_version_string, re.VERBOSE)
        if not solc_version_search:
            raise SolidityCompiler.VersionError(
                f"Can't parse solidity version: {raw_solc_version_string}")
        solc_version = solc_version_search.group(1)
        if not solc_version == SOLIDITY_COMPILER_VERSION:
            raise SolidityCompiler.VersionError(
                f"Solidity version {solc_version} is unsupported. "
                f"Use {SOLIDITY_COMPILER_VERSION} or option to ignore this check"
            )

    def compile(self) -> dict:
        interfaces = dict()
        for root_source_dir, other_source_dirs in self.source_dirs:
            if root_source_dir is None:
                self.log.warn("One of the root directories is None")
                continue

            raw_interfaces = self._compile(root_source_dir, other_source_dirs)
            for name, data in raw_interfaces.items():
                # Extract contract version from docs
                version_search = re.search(
                    r"""
                
                \"details\":  # @dev tag in contract docs
                \".*?         # Skip any data in the beginning of details
                \|            # Beginning of version definition |
                (v            # Capture version starting from symbol v
                \d+           # At least one digit of major version
                \.            # Digits splitter
                \d+           # At least one digit of minor version
                \.            # Digits splitter
                \d+           # At least one digit of patch
                )             # End of capturing
                \|            # End of version definition |
                .*?\"         # Skip any data in the end of details
                
                """, data['devdoc'], re.VERBOSE)
                version = version_search.group(
                    1) if version_search else self.__default_contract_version
                try:
                    existence_data = interfaces[name]
                except KeyError:
                    existence_data = dict()
                    interfaces.update({name: existence_data})
                if version not in existence_data:
                    existence_data.update({version: data})
        return interfaces

    def _compile(self, root_source_dir: str, other_source_dirs: [str]) -> dict:
        """Executes the compiler with parameters specified in the json config"""

        self.log.info("Using solidity compiler binary at {}".format(
            self.__sol_binary_path))
        contracts_dir = os.path.join(root_source_dir,
                                     self.__compiled_contracts_dir)
        self.log.info(
            "Compiling solidity source files at {}".format(contracts_dir))

        source_paths = set()
        source_walker = os.walk(top=contracts_dir, topdown=True)
        if other_source_dirs is not None:
            for source_dir in other_source_dirs:
                other_source_walker = os.walk(top=source_dir, topdown=True)
                source_walker = itertools.chain(source_walker,
                                                other_source_walker)

        for root, dirs, files in source_walker:
            for filename in files:
                if filename.endswith('.sol'):
                    path = os.path.join(root, filename)
                    source_paths.add(path)
                    self.log.debug(
                        "Collecting solidity source {}".format(path))

        # Compile with remappings: https://github.com/ethereum/py-solc
        zeppelin_dir = os.path.join(root_source_dir,
                                    self.__zeppelin_library_dir)

        remappings = (
            "contracts={}".format(contracts_dir),
            "zeppelin={}".format(zeppelin_dir),
        )

        self.log.info("Compiling with import remappings {}".format(
            ", ".join(remappings)))

        optimization_runs = self.optimization_runs
        from solc import compile_files
        from solc.exceptions import SolcError
        try:
            compiled_sol = compile_files(source_files=source_paths,
                                         solc_binary=self.__sol_binary_path,
                                         import_remappings=remappings,
                                         allow_paths=root_source_dir,
                                         optimize=True,
                                         optimize_runs=optimization_runs)

            self.log.info(
                "Successfully compiled {} contracts with {} optimization runs".
                format(len(compiled_sol), optimization_runs))

        except FileNotFoundError:
            raise RuntimeError(
                "The solidity compiler is not at the specified path. "
                "Check that the file exists and is executable.")
        except PermissionError:
            raise RuntimeError(
                "The solidity compiler binary at {} is not executable. "
                "Check the file's permissions.".format(self.__sol_binary_path))

        except SolcError:
            raise

        # Cleanup the compiled data keys
        interfaces = {
            name.split(':')[-1]: compiled_sol[name]
            for name in compiled_sol
        }
        return interfaces
Пример #32
0
class Learner:
    """
    Any participant in the "learning loop" - a class inheriting from
    this one has the ability, synchronously or asynchronously,
    to learn about nodes in the network, verify some essential
    details about them, and store information about them for later use.
    """

    _SHORT_LEARNING_DELAY = 5
    _LONG_LEARNING_DELAY = 90
    LEARNING_TIMEOUT = 10
    _ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN = 10

    # For Keeps
    __DEFAULT_NODE_STORAGE = ForgetfulNodeStorage
    __DEFAULT_MIDDLEWARE_CLASS = RestMiddleware

    LEARNER_VERSION = LEARNING_LOOP_VERSION
    node_splitter = BytestringSplitter(VariableLengthBytestring)
    version_splitter = BytestringSplitter((int, 2, {"byteorder": "big"}))
    tracker_class = FleetStateTracker

    invalid_metadata_message = "{} has invalid metadata.  Maybe its stake is over?  Or maybe it is transitioning to a new interface.  Ignoring."
    unknown_version_message = "{} purported to be of version {}, but we're only version {}.  Is there a new version of NuCypher?"
    really_unknown_version_message = "Unable to glean address from node that perhaps purported to be version {}.  We're only version {}."
    fleet_state_icon = ""

    class NotEnoughNodes(RuntimeError):
        pass

    class NotEnoughTeachers(NotEnoughNodes):
        pass

    class UnresponsiveTeacher(ConnectionError):
        pass

    class NotATeacher(ValueError):
        """
        Raised when a character cannot be properly utilized because
        it does not have the proper attributes for learning or verification.
        """

    def __init__(
        self,
        domains: Set,
        network_middleware: RestMiddleware = __DEFAULT_MIDDLEWARE_CLASS(),
        start_learning_now: bool = False,
        learn_on_same_thread: bool = False,
        known_nodes: tuple = None,
        seed_nodes: Tuple[tuple] = None,
        node_storage=None,
        save_metadata: bool = False,
        abort_on_learning_error: bool = False,
        lonely: bool = False,
    ) -> None:

        self.log = Logger("learning-loop")  # type: Logger

        self.learning_domains = domains
        self.network_middleware = network_middleware
        self.save_metadata = save_metadata
        self.start_learning_now = start_learning_now
        self.learn_on_same_thread = learn_on_same_thread

        self._abort_on_learning_error = abort_on_learning_error
        self._learning_listeners = defaultdict(list)
        self._node_ids_to_learn_about_immediately = set()

        self.__known_nodes = self.tracker_class()

        self.lonely = lonely
        self.done_seeding = False

        # Read
        if node_storage is None:
            node_storage = self.__DEFAULT_NODE_STORAGE(
                federated_only=self.federated_only,
                # TODO: remove federated_only
                character_class=self.__class__)

        self.node_storage = node_storage
        if save_metadata and node_storage is NO_STORAGE_AVAILIBLE:
            raise ValueError(
                "Cannot save nodes without a configured node storage")

        known_nodes = known_nodes or tuple()
        self.unresponsive_startup_nodes = list(
        )  # TODO: Attempt to use these again later
        for node in known_nodes:
            try:
                self.remember_node(
                    node
                )  # TODO: Need to test this better - do we ever init an Ursula-Learner with Node Storage?
            except self.UnresponsiveTeacher:
                self.unresponsive_startup_nodes.append(node)

        self.teacher_nodes = deque()
        self._current_teacher_node = None  # type: Teacher
        self._learning_task = task.LoopingCall(self.keep_learning_about_nodes)
        self._learning_round = 0  # type: int
        self._rounds_without_new_nodes = 0  # type: int
        self._seed_nodes = seed_nodes or []
        self.unresponsive_seed_nodes = set()

        if self.start_learning_now:
            self.start_learning_loop(now=self.learn_on_same_thread)

    @property
    def known_nodes(self):
        return self.__known_nodes

    def load_seednodes(self,
                       read_storages: bool = True,
                       retry_attempts: int = 3):  # TODO: why are these unused?
        """
        Engage known nodes from storages and pre-fetch hardcoded seednode certificates for node learning.
        """
        if self.done_seeding:
            self.log.debug("Already done seeding; won't try again.")
            return

        from nucypher.characters.lawful import Ursula
        for seednode_metadata in self._seed_nodes:

            self.log.debug("Seeding from: {}|{}:{}".format(
                seednode_metadata.checksum_public_address,
                seednode_metadata.rest_host, seednode_metadata.rest_port))

            seed_node = Ursula.from_seednode_metadata(
                seednode_metadata=seednode_metadata,
                network_middleware=self.network_middleware,
                federated_only=self.federated_only)  # TODO: 466
            if seed_node is False:
                self.unresponsive_seed_nodes.add(seednode_metadata)
            else:
                self.unresponsive_seed_nodes.discard(seednode_metadata)
                self.remember_node(seed_node)

        if not self.unresponsive_seed_nodes:
            self.log.info("Finished learning about all seednodes.")

        self.done_seeding = True

        if read_storages is True:
            self.read_nodes_from_storage()

        if not self.known_nodes:
            self.log.warn(
                "No seednodes were available after {} attempts".format(
                    retry_attempts))
            # TODO: Need some actual logic here for situation with no seed nodes (ie, maybe try again much later)

    def read_nodes_from_storage(self) -> set:
        stored_nodes = self.node_storage.all(
            federated_only=self.federated_only)  # TODO: 466
        for node in stored_nodes:
            self.remember_node(node)

    def remember_node(self,
                      node,
                      force_verification_check=False,
                      record_fleet_state=True):

        if node == self:  # No need to remember self.
            return False

        # First, determine if this is an outdated representation of an already known node.
        with suppress(KeyError):
            already_known_node = self.known_nodes[node.checksum_public_address]
            if not node.timestamp > already_known_node.timestamp:
                self.log.debug("Skipping already known node {}".format(
                    already_known_node))
                # This node is already known.  We can safely return.
                return False

        try:
            stranger_certificate = node.certificate
        except AttributeError:
            # Whoops, we got an Alice, Bob, or someone...
            raise self.NotATeacher(
                f"{node.__class__.__name__} does not have a certificate and cannot be remembered."
            )

        # Store node's certificate - It has been seen.
        certificate_filepath = self.node_storage.store_node_certificate(
            certificate=stranger_certificate)

        # In some cases (seed nodes or other temp stored certs),
        # this will update the filepath from the temp location to this one.
        node.certificate_filepath = certificate_filepath
        self.log.info(
            f"Saved TLS certificate for {node.nickname}: {certificate_filepath}"
        )

        try:
            node.verify_node(
                force=force_verification_check,
                network_middleware=self.network_middleware,
                accept_federated_only=self.federated_only,
                # TODO: 466 - move federated-only up to Learner?
            )
        except SSLError:
            return False  # TODO: Bucket this node as having bad TLS info - maybe it's an update that hasn't fully propagated?

        except NodeSeemsToBeDown:
            self.log.info(
                "No Response while trying to verify node {}|{}".format(
                    node.rest_interface, node))
            return False  # TODO: Bucket this node as "ghost" or something: somebody else knows about it, but we can't get to it.

        listeners = self._learning_listeners.pop(node.checksum_public_address,
                                                 tuple())
        address = node.checksum_public_address

        self.known_nodes[address] = node

        if self.save_metadata:
            self.node_storage.store_node_metadata(node=node)

        self.log.info("Remembering {} ({}), popping {} listeners.".format(
            node.nickname, node.checksum_public_address, len(listeners)))
        for listener in listeners:
            listener.add(address)
        self._node_ids_to_learn_about_immediately.discard(address)

        if record_fleet_state:
            self.known_nodes.record_fleet_state()

        return node

    def start_learning_loop(self, now=False):
        if self._learning_task.running:
            return False
        elif now:
            self.log.info("Starting Learning Loop NOW.")

            if self.lonely:
                self.done_seeding = True
                self.read_nodes_from_storage()

            else:
                self.load_seednodes()

            self.learn_from_teacher_node()
            self.learning_deferred = self._learning_task.start(
                interval=self._SHORT_LEARNING_DELAY)
            self.learning_deferred.addErrback(self.handle_learning_errors)
            return self.learning_deferred
        else:
            self.log.info("Starting Learning Loop.")

            learning_deferreds = list()
            if not self.lonely:
                seeder_deferred = deferToThread(self.load_seednodes)
                seeder_deferred.addErrback(self.handle_learning_errors)
                learning_deferreds.append(seeder_deferred)

            learner_deferred = self._learning_task.start(
                interval=self._SHORT_LEARNING_DELAY, now=now)
            learner_deferred.addErrback(self.handle_learning_errors)
            learning_deferreds.append(learner_deferred)

            self.learning_deferred = defer.DeferredList(learning_deferreds)
            return self.learning_deferred

    def stop_learning_loop(self, reason=None):
        """
        Only for tests at this point.  Maybe some day for graceful shutdowns.
        """
        self._learning_task.stop()

    def handle_learning_errors(self, *args, **kwargs):
        failure = args[0]
        if self._abort_on_learning_error:
            self.log.critical(
                "Unhandled error during node learning.  Attempting graceful crash."
            )
            reactor.callFromThread(self._crash_gracefully, failure=failure)
        else:
            cleaned_traceback = failure.getTraceback().replace(
                '{', '').replace('}', '')  # FIXME: Amazing.
            self.log.warn("Unhandled error during node learning: {}".format(
                cleaned_traceback))
            if not self._learning_task.running:
                self.start_learning_loop(
                )  # TODO: Consider a single entry point for this with more elegant pause and unpause.

    def _crash_gracefully(self, failure=None):
        """
        A facility for crashing more gracefully in the event that an exception
        is unhandled in a different thread, especially inside a loop like the learning loop.
        """
        self._crashed = failure
        failure.raiseException()
        # TODO: We don't actually have checksum_public_address at this level - maybe only Characters can crash gracefully :-)
        self.log.critical("{} crashed with {}".format(
            self.checksum_public_address, failure))

    def select_teacher_nodes(self):
        nodes_we_know_about = self.known_nodes.shuffled()

        if not nodes_we_know_about:
            raise self.NotEnoughTeachers(
                "Need some nodes to start learning from.")

        self.teacher_nodes.extend(nodes_we_know_about)

    def cycle_teacher_node(self):
        # To ensure that all the best teachers are available, first let's make sure
        # that we have connected to all the seed nodes.
        if self.unresponsive_seed_nodes and not self.lonely:
            self.log.info(
                "Still have unresponsive seed nodes; trying again to connect.")
            self.load_seednodes()  # Ideally, this is async and singular.

        if not self.teacher_nodes:
            self.select_teacher_nodes()
        try:
            self._current_teacher_node = self.teacher_nodes.pop()
        except IndexError:
            error = "Not enough nodes to select a good teacher, Check your network connection then node configuration"
            raise self.NotEnoughTeachers(error)
        self.log.info("Cycled teachers; New teacher is {}".format(
            self._current_teacher_node))

    def current_teacher_node(self, cycle=False):
        if cycle:
            self.cycle_teacher_node()

        if not self._current_teacher_node:
            self.cycle_teacher_node()

        teacher = self._current_teacher_node

        return teacher

    def learn_about_nodes_now(self, force=False):
        if self._learning_task.running:
            self._learning_task.reset()
            self._learning_task()
        elif not force:
            self.log.warn(
                "Learning loop isn't started; can't learn about nodes now.  You can override this with force=True."
            )
        elif force:
            self.log.info("Learning loop wasn't started; forcing start now.")
            self._learning_task.start(self._SHORT_LEARNING_DELAY, now=True)

    def keep_learning_about_nodes(self):
        """
        Continually learn about new nodes.
        """
        # TODO: Allow the user to set eagerness?
        self.learn_from_teacher_node(eager=False)

    def learn_about_specific_nodes(self, addresses: Set):
        self._node_ids_to_learn_about_immediately.update(addresses)  # hmmmm
        self.learn_about_nodes_now()

    # TODO: Dehydrate these next two methods.

    def block_until_number_of_known_nodes_is(
            self,
            number_of_nodes_to_know: int,
            timeout: int = 10,
            learn_on_this_thread: bool = False):
        start = maya.now()
        starting_round = self._learning_round

        while True:
            rounds_undertaken = self._learning_round - starting_round
            if len(self.__known_nodes) >= number_of_nodes_to_know:
                if rounds_undertaken:
                    self.log.info(
                        "Learned about enough nodes after {} rounds.".format(
                            rounds_undertaken))
                return True

            if not self._learning_task.running:
                self.log.warn(
                    "Blocking to learn about nodes, but learning loop isn't running."
                )
            if learn_on_this_thread:
                try:
                    self.learn_from_teacher_node(eager=True)
                except (requests.exceptions.ReadTimeout,
                        requests.exceptions.ConnectTimeout):
                    # TODO: Even this "same thread" logic can be done off the main thread.
                    self.log.warn(
                        "Teacher was unreachable.  No good way to handle this on the main thread."
                    )

            # The rest of the f*****g owl
            if (maya.now() - start).seconds > timeout:
                if not self._learning_task.running:
                    raise RuntimeError(
                        "Learning loop is not running.  Start it with start_learning()."
                    )
                else:
                    raise self.NotEnoughNodes(
                        "After {} seconds and {} rounds, didn't find {} nodes".
                        format(timeout, rounds_undertaken,
                               number_of_nodes_to_know))
            else:
                time.sleep(.1)

    def block_until_specific_nodes_are_known(self,
                                             addresses: Set,
                                             timeout=LEARNING_TIMEOUT,
                                             allow_missing=0,
                                             learn_on_this_thread=False):
        start = maya.now()
        starting_round = self._learning_round

        while True:
            if self._crashed:
                return self._crashed
            rounds_undertaken = self._learning_round - starting_round
            if addresses.issubset(self.known_nodes.addresses()):
                if rounds_undertaken:
                    self.log.info(
                        "Learned about all nodes after {} rounds.".format(
                            rounds_undertaken))
                return True

            if not self._learning_task.running:
                self.log.warn(
                    "Blocking to learn about nodes, but learning loop isn't running."
                )
            if learn_on_this_thread:
                self.learn_from_teacher_node(eager=True)

            if (maya.now() - start).seconds > timeout:

                still_unknown = addresses.difference(
                    self.known_nodes.addresses())

                if len(still_unknown) <= allow_missing:
                    return False
                elif not self._learning_task.running:
                    raise self.NotEnoughTeachers(
                        "The learning loop is not running.  Start it with start_learning()."
                    )
                else:
                    raise self.NotEnoughTeachers(
                        "After {} seconds and {} rounds, didn't find these {} nodes: {}"
                        .format(timeout, rounds_undertaken, len(still_unknown),
                                still_unknown))
            else:
                time.sleep(.1)

    def _adjust_learning(self, node_list):
        """
        Takes a list of new nodes, adjusts learning accordingly.

        Currently, simply slows down learning loop when no new nodes have been discovered in a while.
        TODO: Do other important things - scrub, bucket, etc.
        """
        if node_list:
            self._rounds_without_new_nodes = 0
            self._learning_task.interval = self._SHORT_LEARNING_DELAY
        else:
            self._rounds_without_new_nodes += 1
            if self._rounds_without_new_nodes > self._ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN:
                self.log.info(
                    "After {} rounds with no new nodes, it's time to slow down to {} seconds."
                    .format(
                        self._ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN,
                        self._LONG_LEARNING_DELAY))
                self._learning_task.interval = self._LONG_LEARNING_DELAY

    def _push_certain_newly_discovered_nodes_here(self, queue_to_push,
                                                  node_addresses):
        """
        If any node_addresses are discovered, push them to queue_to_push.
        """
        for node_address in node_addresses:
            self.log.info("Adding listener for {}".format(node_address))
            self._learning_listeners[node_address].append(queue_to_push)

    def network_bootstrap(self, node_list: list) -> None:
        for node_addr, port in node_list:
            new_nodes = self.learn_about_nodes_now(node_addr, port)
            self.__known_nodes.update(new_nodes)

    def get_nodes_by_ids(self, node_ids):
        for node_id in node_ids:
            try:
                # Scenario 1: We already know about this node.
                return self.__known_nodes[node_id]
            except KeyError:
                raise NotImplementedError
        # Scenario 2: We don't know about this node, but a nearby node does.
        # TODO: Build a concurrent pool of lookups here.

        # Scenario 3: We don't know about this node, and neither does our friend.

    def write_node_metadata(self, node, serializer=bytes) -> str:
        return self.node_storage.store_node_metadata(node=node)

    def learn_from_teacher_node(self, eager=True):
        """
        Sends a request to node_url to find out about known nodes.
        """
        self._learning_round += 1

        try:
            current_teacher = self.current_teacher_node()
        except self.NotEnoughTeachers as e:
            self.log.warn("Can't learn right now: {}".format(e.args[0]))
            return

        if Teacher in self.__class__.__bases__:
            announce_nodes = [self]
        else:
            announce_nodes = None

        unresponsive_nodes = set()
        try:
            # TODO: Streamline path generation
            certificate_filepath = self.node_storage.generate_certificate_filepath(
                checksum_address=current_teacher.checksum_public_address)
            response = self.network_middleware.get_nodes_via_rest(
                node=current_teacher,
                nodes_i_need=self._node_ids_to_learn_about_immediately,
                announce_nodes=announce_nodes,
                fleet_checksum=self.known_nodes.checksum)
        except NodeSeemsToBeDown as e:
            unresponsive_nodes.add(current_teacher)
            self.log.info("Bad Response from teacher: {}:{}.".format(
                current_teacher, e))
            return
        finally:
            self.cycle_teacher_node()

        #
        # Before we parse the response, let's handle some edge cases.
        if response.status_code == 204:
            # In this case, this node knows about no other nodes.  Hopefully we've taught it something.
            if response.content == b"":
                return NO_KNOWN_NODES
            # In the other case - where the status code is 204 but the repsonse isn't blank - we'll keep parsing.
            # It's possible that our fleet states match, and we'll check for that later.

        elif response.status_code != 200:
            self.log.info("Bad response from teacher {}: {} - {}".format(
                current_teacher, response, response.content))
            return

        try:
            signature, node_payload = signature_splitter(response.content,
                                                         return_remainder=True)
        except BytestringSplittingError as e:
            self.log.warn(e.args[0])
            return

        try:
            self.verify_from(current_teacher,
                             node_payload,
                             signature=signature)
        except current_teacher.InvalidSignature:
            # TODO: What to do if the teacher improperly signed the node payload?
            raise
        # End edge case handling.
        #

        fleet_state_checksum_bytes, fleet_state_updated_bytes, node_payload = FleetStateTracker.snapshot_splitter(
            node_payload, return_remainder=True)
        current_teacher.last_seen = maya.now()
        # TODO: This is weird - let's get a stranger FleetState going.
        checksum = fleet_state_checksum_bytes.hex()

        # TODO: This doesn't make sense - a decentralized node can still learn about a federated-only node.
        from nucypher.characters.lawful import Ursula
        if constant_or_bytes(node_payload) is FLEET_STATES_MATCH:
            current_teacher.update_snapshot(
                checksum=checksum,
                updated=maya.MayaDT(
                    int.from_bytes(fleet_state_updated_bytes,
                                   byteorder="big")),
                number_of_known_nodes=len(self.known_nodes))
            return FLEET_STATES_MATCH

        node_list = Ursula.batch_from_bytes(
            node_payload, federated_only=self.federated_only)  # TODO: 466

        current_teacher.update_snapshot(checksum=checksum,
                                        updated=maya.MayaDT(
                                            int.from_bytes(
                                                fleet_state_updated_bytes,
                                                byteorder="big")),
                                        number_of_known_nodes=len(node_list))

        new_nodes = []
        for node in node_list:
            if GLOBAL_DOMAIN not in self.learning_domains:
                if not set(self.learning_domains).intersection(
                        set(node.serving_domains)):
                    continue  # This node is not serving any of our domains.

            # First, determine if this is an outdated representation of an already known node.
            with suppress(KeyError):
                already_known_node = self.known_nodes[
                    node.checksum_public_address]
                if not node.timestamp > already_known_node.timestamp:
                    self.log.debug("Skipping already known node {}".format(
                        already_known_node))
                    # This node is already known.  We can safely continue to the next.
                    continue

            certificate_filepath = self.node_storage.store_node_certificate(
                certificate=node.certificate)

            try:
                if eager:
                    node.verify_node(
                        self.network_middleware,
                        accept_federated_only=self.federated_only,  # TODO: 466
                        certificate_filepath=certificate_filepath)
                    self.log.debug("Verified node: {}".format(
                        node.checksum_public_address))

                else:
                    node.validate_metadata(
                        accept_federated_only=self.federated_only)  # TODO: 466
            # This block is a mess of eagerness.  This can all be done better lazily.
            except NodeSeemsToBeDown as e:
                self.log.info(
                    f"Can't connect to {node} to verify it right now.")
            except node.InvalidNode:
                # TODO: Account for possibility that stamp, rather than interface, was bad.
                self.log.warn(node.invalid_metadata_message.format(node))
            except node.SuspiciousActivity:
                message = "Suspicious Activity: Discovered node with bad signature: {}.  " \
                          "Propagated by: {}".format(current_teacher.checksum_public_address, teacher_uri)
                self.log.warn(message)
            else:
                new = self.remember_node(node, record_fleet_state=False)
                if new:
                    new_nodes.append(node)

        self._adjust_learning(new_nodes)

        learning_round_log_message = "Learning round {}.  Teacher: {} knew about {} nodes, {} were new."
        self.log.info(
            learning_round_log_message.format(self._learning_round,
                                              current_teacher, len(node_list),
                                              len(new_nodes)), )
        if new_nodes:
            self.known_nodes.record_fleet_state()
            for node in new_nodes:
                self.node_storage.store_node_certificate(
                    certificate=node.certificate)
        return new_nodes
Пример #33
0
class Discord(IRCClient):

	nickname = "discord"
	realname = "Discord"
	username = "******"
	versionName = "Discord"
	versionNum = "0.01"

	magicFile = "true.txt"

	def __init__(self, accessList):
		self.logger = Logger(observer=textFileLogObserver(sys.stdout))

		self.accessList = [nick.lower() for nick in accessList]

		if not os.path.exists(self.magicFile):
			self.logger.info("Creating magic file")

			try:
				with open(self.magicFile, "a"):
					pass

			except Exception as ex:
				self.logger.error("Unable to create magic file! {0}".format(ex.message))
				reactor.stop()

		self.markovGenerator = pymarkov.MarkovChainGenerator(self.magicFile)

		self.channels = []
		self.channelPhrasers = {}

		self.logger.debug("Discord initialized")

		# Maybe add hook/plugin system here?

		self.commands = Commands.Commands(self)		

	def removeChannel(self, channel):
		try:
			self.channels.remove(channel)

			self.channelPhrasers[channel].stop()
			
			del self.channelPhrasers[channel]

		except:
			self.logger.error("Error removing {channel} from collection", channel=channel)

	def insertPhrase(self, phrase):
		try:
			with open(self.magicFile, "a") as magicFile:
				magicFile.write("{0}\n".format(phrase))

			try:
				file, ext = os.path.splitext(self.magicFile)
				os.remove("{0}-pickled{1}".format(file, ext))

				# Simply re-populating the dictionary isn't enough for some reason
				self.markovGenerator = pymarkov.MarkovChainGenerator(self.magicFile, 2)

			except IOError as ex:
				self.logger.error("Unable to delete pickled file. {0}".format(ex.message))			

		except Exception as ex:
			self.logger.error("Unable to insert phrase into magic file! {0}".format(ex.message))

	def kickedFrom(self, channel, kicker, message):
		self.removeChannel(channel)

		self.logger.info("Kicked from {channel} by {kicker}", channel=channel, kicker=kicker)

	def left(self, channel):
		self.removeChannel(channel)

		self.logger.info("Left {channel}", channel=channel)

	def handleMessage(self, user, channel, message):
		senderNickname = user.split("!")[0]

		if message.startswith("~reload") and senderNickname in self.accessList:
			self.logger.info("Reloading commands module")
			self.say(channel, "Reloading.")

			try:
				commandsModule = reload(Commands)
				self.commands = commandsModule.Commands(self)

			except Exception as ex:
				self.say(channel, "Failed to load commands module - {0}".format(ex.message))

		elif message.startswith("~"):
			# Don't log commands to the brain
			commandMessage = message[1:]

			self.commands.handleCommand(user, channel, commandMessage)

		else:
			self.logger.info("Adding {message!r} to brain", message=message)

			# Avoid storing anything with the bot's name in it
			brainMessage = message.strip(self.nickname)

			self.insertPhrase(brainMessage)

			try:
				randomPhrase = self.generateSentence()

				if self.nickname in message and channel.startswith("#") and self.channelPhrasers[channel].running:
					phrase = "{0}, {1}".format(senderNickname, randomPhrase)

					self.say(channel, phrase)

				elif channel == self.nickname:
					self.logger.debug("Sending message to {nickname}", nickname=senderNickname)

					self.msg(senderNickname, randomPhrase)

				else:
					pass

			except IndexError as generationError:
				self.logger.error(generationError.message)

	def privmsg(self, user, channel, message):
		self.logger.info("Received message from {user} in {channel}", user=user, channel=channel)

		# deferToThread(self.handleMessage, user, channel, message)
		self.handleMessage(user, channel, message)

	def signedOn(self):
		self.logger.info("Signed on")

		self.join("#bots")

	def joined(self, channel):
		self.channels.append(channel)

		self.logger.info("Joined channel {channel!r}", channel=channel)

		channelPhraser = LoopingCall(self.sayRandomPhrase, channel)
		reactor.callLater(2, channelPhraser.start, 600)

		self.channelPhrasers[channel] = channelPhraser

	def generateSentence(self):
		try:
			sentence = self.markovGenerator.generate_sentence()

			sentence = sentence.strip("<{0}>".format(self.nickname))
			sentence = sentence.strip(self.nickname)

			return sentence

		except (IndexError, ValueError) as ex:
			self.logger.error(ex.message)

	def sayRandomPhrase(self, channel):
		sentence = self.generateSentence()
		self.say(channel, sentence)
Пример #34
0
class StdoutEmitter:

    transport_serializer = str
    default_color = 'white'

    # sys.stdout.write() TODO: doesn't work well with click_runner's output capture
    default_sink_callable = partial(print, flush=True)

    def __init__(self, sink: Callable = None, verbosity: int = 1):

        self.name = self.__class__.__name__.lower()
        self.sink = sink or self.default_sink_callable
        self.verbosity = verbosity
        self.log = Logger(self.name)

    def clear(self):
        if self.verbosity >= 1:
            click.clear()

    def message(self,
                message: str,
                color: str = None,
                bold: bool = False,
                verbosity: int = 1):
        self.echo(message,
                  color=color or self.default_color,
                  bold=bold,
                  verbosity=verbosity)
        self.log.debug(message)

    def echo(self,
             message: str = None,
             color: str = None,
             bold: bool = False,
             nl: bool = True,
             verbosity: int = 0):
        if verbosity <= self.verbosity:
            click.secho(message=message,
                        fg=color or self.default_color,
                        bold=bold,
                        nl=nl)

    def banner(self, banner):
        if self.verbosity >= 1:
            click.echo(banner)

    def ipc(self, response: dict, request_id: int, duration):
        # WARNING: Do not log in this block
        if self.verbosity >= 1:
            for k, v in response.items():
                click.secho(message=f'{k} ...... {v}', fg=self.default_color)

    def error(self, e):
        if self.verbosity >= 1:
            e_str = str(e)
            click.echo(message=e_str)
            self.log.info(e_str)

    def get_stream(self, verbosity: int = 0):
        if verbosity <= self.verbosity:
            return click.get_text_stream('stdout')
        else:
            return null_stream()
Пример #35
0
class XmppEvent(object):

    def __init__(self, nodeId, parent, pubsub_addr):
        self.log = Logger()
        self.nodeId = nodeId
        self.parent = parent
        self.addr = pubsub_addr

    def publish(self, event):

        if len(self.parent.active_controllers) == 0:
            #             self.log.debug('event cancelled')
            self.parent.registrations = []
            return

        def success(res):
            #             print('event sent')
            if res['type'] == 'error':
                self.log.error('Publish Event failed :%s' % res.toXml())
            else:
                if 'Id' in res.children[0].children[0]['node']:
                    self.log.debug('Event Published: %s' % res.toXml())
        name, data = event
        if name == 'Seconds':
            return
        iq = IQ(self.parent.xmlstream, 'set')
        ps = domish.Element(('http://jabber.org/protocol/pubsub', 'pubsub'))
        publish = domish.Element((None, 'publish'))
        publish['node'] = '/'.join((self.nodeId, name))
        item = domish.Element((None, 'item'))
        propertyset = domish.Element(
            ('urn:schemas-upnp-org:event-1-0', 'propertyset'),
            localPrefixes={'e': 'urn:schemas-upnp-org:event-1-0'})
        prop = domish.Element((None, 'e:property'))
        evt = domish.Element((None, name))
        if isinstance(data.value, dict):
            ev = domish.Element((data.namespace, 'Event'))
            inst = domish.Element((None, 'InstanceID'))
            inst['val'] = '0'
            for k, v in data.value.items:
                if 'namespace' in v:
                    var = domish.Element((v['namespace'], k))
                else:
                    var = domish.Element((None, k))
                if 'attrib' in v:
                    attr = v['attrib']
                else:
                    attr = {}
                value = v['value']
                if isinstance(value, bool):
                    value = int(value)
                attr.update(
                    {'val': str(value)
                     .decode('utf-8')})
                for attname, attval in attr:
                    var[attname] = attval
                inst.addChild(var)
            ev.addChild(inst)
            evt.addChild(ev)
        else:
            #             print(str(data.value).decode('utf-8'))
            if isinstance(data.value, bool):
                data.value = int(data.value)
            evt.addContent(str(data.value).decode('utf-8'))
        prop.addChild(evt)
        propertyset.addChild(prop)
        item.addChild(propertyset)
        publish.addChild(item)
        ps.addChild(publish)
        iq.addChild(ps)
        iq.addCallback(success)
        iq.send(to=self.addr)
Пример #36
0
class NodeStorage(ABC):
    _name = NotImplemented
    _TYPE_LABEL = 'storage_type'
    NODE_SERIALIZER = binascii.hexlify
    NODE_DESERIALIZER = binascii.unhexlify
    TLS_CERTIFICATE_ENCODING = Encoding.PEM
    TLS_CERTIFICATE_EXTENSION = '.{}'.format(
        TLS_CERTIFICATE_ENCODING.name.lower())

    class NodeStorageError(Exception):
        pass

    class UnknownNode(NodeStorageError):
        pass

    class InvalidNodeCertificate(RuntimeError):
        """Raised when a TLS certificate is not a valid Teacher certificate."""

    def __init__(
        self,
        federated_only: bool,  # TODO# 466
        character_class=None,
        serializer: Callable = NODE_SERIALIZER,
        deserializer: Callable = NODE_DESERIALIZER,
        registry: BaseContractRegistry = None,
    ) -> None:

        from nucypher.characters.lawful import Ursula

        self.log = Logger(self.__class__.__name__)
        self.registry = registry
        self.serializer = serializer
        self.deserializer = deserializer
        self.federated_only = federated_only
        self.character_class = character_class or Ursula

    def __getitem__(self, item):
        return self.get(checksum_address=item,
                        federated_only=self.federated_only)

    def __setitem__(self, key, value):
        return self.store_node_metadata(node=value)

    def __delitem__(self, key):
        self.remove(checksum_address=key)

    def __iter__(self):
        return self.all(federated_only=self.federated_only)

    @property
    @abstractmethod
    def source(self) -> str:
        """Human readable source string"""
        return NotImplemented

    def _read_common_name(self, certificate: Certificate):
        x509 = OpenSSL.crypto.X509.from_cryptography(certificate)
        subject_components = x509.get_subject().get_components()
        common_name_as_bytes = subject_components[0][1]
        common_name_from_cert = common_name_as_bytes.decode()
        return common_name_from_cert

    def _write_tls_certificate(self,
                               certificate: Certificate,
                               host: str = None,
                               force: bool = True) -> str:

        # Read
        x509 = OpenSSL.crypto.X509.from_cryptography(certificate)
        subject_components = x509.get_subject().get_components()
        common_name_as_bytes = subject_components[0][1]
        common_name_on_certificate = common_name_as_bytes.decode()
        if not host:
            host = common_name_on_certificate

        try:
            pseudonym = certificate.subject.get_attributes_for_oid(
                NameOID.PSEUDONYM)[0]
        except IndexError:
            raise self.InvalidNodeCertificate(
                f"Missing checksum address on certificate for host '{host}'. "
                f"Does this certificate belong to an Ursula?")
        else:
            checksum_address = pseudonym.value

        if not is_checksum_address(checksum_address):
            raise self.InvalidNodeCertificate(
                "Invalid certificate wallet address encountered: {}".format(
                    checksum_address))

        # Validate
        # TODO: It's better for us to have checked this a while ago so that this situation is impossible.  #443
        if host and (host != common_name_on_certificate):
            raise ValueError(
                f"You passed a hostname ('{host}') that does not match the certificate's common name."
            )

        certificate_filepath = self.generate_certificate_filepath(
            checksum_address=checksum_address)
        certificate_already_exists = os.path.isfile(certificate_filepath)
        if force is False and certificate_already_exists:
            raise FileExistsError(
                'A TLS certificate already exists at {}.'.format(
                    certificate_filepath))

        # Write
        os.makedirs(os.path.dirname(certificate_filepath), exist_ok=True)
        with open(certificate_filepath, 'wb') as certificate_file:
            public_pem_bytes = certificate.public_bytes(
                self.TLS_CERTIFICATE_ENCODING)
            certificate_file.write(public_pem_bytes)

        self.log.debug(
            f"Saved TLS certificate for {checksum_address}: {certificate_filepath}"
        )

        return certificate_filepath

    @abstractmethod
    def store_node_certificate(self, certificate: Certificate) -> str:
        raise NotImplementedError

    @abstractmethod
    def store_node_metadata(self, node, filepath: str = None) -> str:
        """Save a single node's metadata and tls certificate"""
        raise NotImplementedError

    @abstractmethod
    def generate_certificate_filepath(self, checksum_address: str) -> str:
        raise NotImplementedError

    @abstractmethod
    def payload(self) -> dict:
        raise NotImplementedError

    @classmethod
    @abstractmethod
    def from_payload(self, data: dict, *args, **kwargs) -> 'NodeStorage':
        """Instantiate a storage object from a dictionary"""
        raise NotImplementedError

    @abstractmethod
    def initialize(self):
        """One-time initialization steps to establish a node storage backend"""
        raise NotImplementedError

    @abstractmethod
    def all(self,
            federated_only: bool,
            certificates_only: bool = False) -> set:
        """Return s set of all stored nodes"""
        raise NotImplementedError

    @abstractmethod
    def get(self, checksum_address: str, federated_only: bool):
        """Retrieve a single stored node"""
        raise NotImplementedError

    @abstractmethod
    def remove(self, checksum_address: str) -> bool:
        """Remove a single stored node"""
        raise NotImplementedError

    @abstractmethod
    def clear(self) -> bool:
        """Remove all stored nodes"""
        raise NotImplementedError
Пример #37
0
class TESSProtocol(LineOnlyReceiver):

    # So that we can patch it in tests with Clock.callLater ...
    callLater = reactor.callLater

    # -------------------------
    # Twisted Line Receiver API
    # -------------------------

    def __init__(self, namespace):
        '''Sets the delimiter to the closihg parenthesis'''
        # LineOnlyReceiver.delimiter = b'\n'
        self.log = Logger(namespace=namespace)
        self._consumer = None
        self._paused = True
        self._stopped = False

    def connectionMade(self):
        self.log.debug("connectionMade()")

    def lineReceived(self, line):
        now = datetime.datetime.utcnow().replace(
            microsecond=0) + datetime.timedelta(seconds=0.5)
        line = line.decode('latin_1')  # from bytearray to string
        self.log.info("<== TESS-W [{l:02d}] {line}", l=len(line), line=line)
        handled, reading = self._handleUnsolicitedResponse(line, now)
        if handled:
            self._consumer.write(reading)

    # -----------------------
    # IPushProducer interface
    # -----------------------

    def stopProducing(self):
        """
        Stop producing data.
        """
        self._stopped = False

    def pauseProducing(self):
        """
        Pause producing data.
        """
        self._paused = True

    def resumeProducing(self):
        """
        Resume producing data.
        """
        self._paused = False

    def registerConsumer(self, consumer):
        '''
        This is not really part of the IPushProducer interface
        '''
        self._consumer = IConsumer(consumer)

    # =================
    # TESS Protocol API
    # =================

    def setContext(self, context):
        self.httpEndPoint = context

    @inlineCallbacks
    def writeZeroPoint(self, zero_point):
        '''
        Writes Zero Point to the device. 
        Asynchronous operation
        '''
        result = {}
        result['tstamp'] = datetime.datetime.utcnow().replace(
            microsecond=0) + datetime.timedelta(seconds=0.5)
        url = make_save_url(self.httpEndPoint)
        self.log.info("==> TESS-W [HTTP GET] {url}", url=url)
        params = [('cons', '{0:0.2f}'.format(zero_point))]
        resp = yield treq.get(url, params=params, timeout=4)
        text = yield treq.text_content(resp)
        self.log.info("<== TESS-W [HTTP GET] {url}", url=url)
        matchobj = GET_INFO['flash'].search(text)
        result['zp'] = float(matchobj.groups(1)[0])
        returnValue(result)

    @inlineCallbacks
    def readPhotometerInfo(self):
        '''
        Reads Info from the device. 
        Asynchronous operation
        '''
        result = {}
        result['tstamp'] = datetime.datetime.utcnow().replace(
            microsecond=0) + datetime.timedelta(seconds=0.5)
        url = make_state_url(self.httpEndPoint)
        self.log.info("==> TESS-W [HTTP GET] {url}", url=url)
        resp = yield treq.get(url, timeout=4)
        text = yield treq.text_content(resp)
        self.log.info("<== TESS-W [HTTP GET] {url}", url=url)
        matchobj = GET_INFO['name'].search(text)
        result['name'] = matchobj.groups(1)[0]
        matchobj = GET_INFO['mac'].search(text)
        result['mac'] = matchobj.groups(1)[0]
        matchobj = GET_INFO['zp'].search(text)
        result['zp'] = float(matchobj.groups(1)[0])
        matchobj = GET_INFO['firmware'].search(text)
        result['firmware'] = matchobj.groups(1)[0]
        returnValue(result)

    # --------------
    # Helper methods
    # --------------

    def _match_unsolicited(self, line):
        '''Returns matched command descriptor or None'''
        for regexp in UNSOLICITED_PATTERNS:
            matchobj = regexp.search(line)
            if matchobj:
                i = UNSOLICITED_PATTERNS.index(regexp)
                #self.log.debug("matched {pattern}", pattern=UNSOLICITED_RESPONSES[UNSOLICITED_PATTERNS.index(regexp)]['name'])
                return UNSOLICITED_RESPONSES[i], matchobj
        return None, None

    def _handleUnsolicitedResponse(self, line, tstamp):
        '''
        Handle unsolicited responses from zptess.
        Returns True if handled, False otherwise
        '''
        if self._paused or self._stopped:
            self.log.debug("Producer either paused({p}) or stopped({s})",
                           p=self._paused,
                           s=self._stopped)
            return False, None
        ur, matchobj = self._match_unsolicited(line)
        if not ur:
            return False, None
        reading = {}
        reading['tbox'] = float(matchobj.group(2)) / 100.0
        reading['tsky'] = float(matchobj.group(3)) / 100.0
        reading['zp'] = float(matchobj.group(4)) / 100.0
        reading['tstamp'] = tstamp
        if ur['name'] == 'Hz reading':
            reading['freq'] = float(matchobj.group(1)) / 1.0
            self.log.debug("Matched {name}", name=ur['name'])
        elif ur['name'] == 'mHz reading':
            reading['freq'] = float(matchobj.group(1)) / 1000.0
            self.log.debug("Matched {name}", name=ur['name'])
        else:
            return False, None
        return True, reading
Пример #38
0
class LocalFileBasedNodeStorage(NodeStorage):
    _name = 'local'
    __METADATA_FILENAME_TEMPLATE = '{}.node'

    class NoNodeMetadataFileFound(FileNotFoundError, NodeStorage.UnknownNode):
        pass

    def __init__(self,
                 config_root: str = None,
                 storage_root: str = None,
                 metadata_dir: str = None,
                 certificates_dir: str = None,
                 *args,
                 **kwargs) -> None:

        super().__init__(*args, **kwargs)
        self.log = Logger(self.__class__.__name__)

        self.root_dir = storage_root
        self.metadata_dir = metadata_dir
        self.certificates_dir = certificates_dir
        self._cache_storage_filepaths(config_root=config_root)

    @property
    def source(self) -> str:
        """Human readable source string"""
        return self.root_dir

    @staticmethod
    def _generate_storage_filepaths(config_root: str = None,
                                    storage_root: str = None,
                                    metadata_dir: str = None,
                                    certificates_dir: str = None):

        storage_root = storage_root or os.path.join(
            config_root or DEFAULT_CONFIG_ROOT, 'known_nodes')
        metadata_dir = metadata_dir or os.path.join(storage_root, 'metadata')
        certificates_dir = certificates_dir or os.path.join(
            storage_root, 'certificates')

        payload = {
            'storage_root': storage_root,
            'metadata_dir': metadata_dir,
            'certificates_dir': certificates_dir
        }

        return payload

    def _cache_storage_filepaths(self, config_root: str = None):
        filepaths = self._generate_storage_filepaths(
            config_root=config_root,
            storage_root=self.root_dir,
            metadata_dir=self.metadata_dir,
            certificates_dir=self.certificates_dir)
        self.root_dir = filepaths['storage_root']
        self.metadata_dir = filepaths['metadata_dir']
        self.certificates_dir = filepaths['certificates_dir']

    #
    # Certificates
    #

    @validate_checksum_address
    def __get_certificate_filename(self, checksum_address: str):
        return '{}.{}'.format(checksum_address, Encoding.PEM.name.lower())

    def __get_certificate_filepath(self, certificate_filename: str) -> str:
        return os.path.join(self.certificates_dir, certificate_filename)

    @validate_checksum_address
    def generate_certificate_filepath(self, checksum_address: str) -> str:
        certificate_filename = self.__get_certificate_filename(
            checksum_address)
        certificate_filepath = self.__get_certificate_filepath(
            certificate_filename=certificate_filename)
        return certificate_filepath

    @validate_checksum_address
    def __read_tls_public_certificate(
            self,
            filepath: str = None,
            checksum_address: str = None) -> Certificate:
        """Deserialize an X509 certificate from a filepath"""
        if not bool(filepath) ^ bool(checksum_address):
            raise ValueError(
                "Either pass filepath or checksum_address; Not both.")

        if not filepath and checksum_address is not None:
            filepath = self.generate_certificate_filepath(checksum_address)

        try:
            with open(filepath, 'rb') as certificate_file:
                cert = x509.load_pem_x509_certificate(
                    certificate_file.read(), backend=default_backend())
                return cert
        except FileNotFoundError:
            raise FileNotFoundError(
                "No SSL certificate found at {}".format(filepath))

    #
    # Metadata
    #

    @validate_checksum_address
    def __generate_metadata_filepath(self,
                                     checksum_address: str,
                                     metadata_dir: str = None) -> str:
        metadata_path = os.path.join(
            metadata_dir or self.metadata_dir,
            self.__METADATA_FILENAME_TEMPLATE.format(checksum_address))
        return metadata_path

    def __read_metadata(self, filepath: str, federated_only: bool):

        from nucypher.characters.lawful import Ursula

        try:
            with open(filepath, "rb") as seed_file:
                seed_file.seek(0)
                node_bytes = self.deserializer(seed_file.read())
                node = Ursula.from_bytes(node_bytes)
        except FileNotFoundError:
            raise self.UnknownNode
        return node

    def __write_metadata(self, filepath: str, node):
        os.makedirs(os.path.dirname(filepath), exist_ok=True)
        with open(filepath, "wb") as f:
            f.write(self.serializer(bytes(node)))
        self.log.info(
            "Wrote new node metadata to filesystem {}".format(filepath))
        return filepath

    #
    # API
    #
    def all(self,
            federated_only: bool,
            certificates_only: bool = False) -> Set[Union[Any, Certificate]]:
        filenames = os.listdir(
            self.certificates_dir if certificates_only else self.metadata_dir)
        self.log.info("Found {} known node metadata files at {}".format(
            len(filenames), self.metadata_dir))

        known_certificates = set()
        if certificates_only:
            for filename in filenames:
                certificate = self.__read_tls_public_certificate(
                    os.path.join(self.certificates_dir, filename))
                known_certificates.add(certificate)
            return known_certificates

        else:
            known_nodes = set()
            for filename in filenames:
                metadata_path = os.path.join(self.metadata_dir, filename)
                node = self.__read_metadata(
                    filepath=metadata_path,
                    federated_only=federated_only)  # TODO: 466
                known_nodes.add(node)
            return known_nodes

    @validate_checksum_address
    def get(self,
            checksum_address: str,
            federated_only: bool,
            certificate_only: bool = False):
        if certificate_only is True:
            certificate = self.__read_tls_public_certificate(
                checksum_address=checksum_address)
            return certificate
        metadata_path = self.__generate_metadata_filepath(
            checksum_address=checksum_address)
        node = self.__read_metadata(filepath=metadata_path,
                                    federated_only=federated_only)  # TODO: 466
        return node

    def store_node_certificate(self,
                               certificate: Certificate,
                               force: bool = True):
        certificate_filepath = self._write_tls_certificate(
            certificate=certificate, force=force)
        return certificate_filepath

    def store_node_metadata(self, node, filepath: str = None) -> str:
        address = node.checksum_address
        filepath = self.__generate_metadata_filepath(checksum_address=address,
                                                     metadata_dir=filepath)
        self.__write_metadata(filepath=filepath, node=node)
        return filepath

    def save_node(self, node, force) -> Tuple[str, str]:
        certificate_filepath = self.store_node_certificate(
            certificate=node.certificate, force=force)
        metadata_filepath = self.store_node_metadata(node=node)
        return metadata_filepath, certificate_filepath

    @validate_checksum_address
    def remove(self,
               checksum_address: str,
               metadata: bool = True,
               certificate: bool = True) -> None:

        if metadata is True:
            metadata_filepath = self.__generate_metadata_filepath(
                checksum_address=checksum_address)
            os.remove(metadata_filepath)
            self.log.debug(
                "Deleted {} from the filesystem".format(checksum_address))

        if certificate is True:
            certificate_filepath = self.generate_certificate_filepath(
                checksum_address=checksum_address)
            os.remove(certificate_filepath)
            self.log.debug(
                "Deleted {} from the filesystem".format(checksum_address))

        return

    def clear(self, metadata: bool = True, certificates: bool = True) -> None:
        """Forget all stored nodes and certificates"""
        def __destroy_dir_contents(path) -> None:
            try:
                paths_to_remove = os.listdir(path)
            except FileNotFoundError:
                return
            else:
                for file in paths_to_remove:
                    file_path = os.path.join(path, file)
                    if os.path.isfile(file_path):
                        os.unlink(file_path)

        if metadata is True:
            __destroy_dir_contents(self.metadata_dir)
        if certificates is True:
            __destroy_dir_contents(self.certificates_dir)

        return

    def payload(self) -> dict:
        payload = {
            'storage_type': self._name,
            'storage_root': self.root_dir,
            'metadata_dir': self.metadata_dir,
            'certificates_dir': self.certificates_dir
        }
        return payload

    @classmethod
    def from_payload(cls, payload: dict, *args,
                     **kwargs) -> 'LocalFileBasedNodeStorage':
        storage_type = payload[cls._TYPE_LABEL]
        if not storage_type == cls._name:
            raise cls.NodeStorageError(
                "Wrong storage type. got {}".format(storage_type))
        del payload['storage_type']

        return cls(*args, **payload, **kwargs)

    def initialize(self) -> bool:
        storage_dirs = (self.root_dir, self.metadata_dir,
                        self.certificates_dir)
        for storage_dir in storage_dirs:
            try:
                os.mkdir(storage_dir, mode=0o755)
            except FileExistsError:
                message = "There are pre-existing files at {}".format(
                    self.root_dir)
                self.log.info(message)
            except FileNotFoundError:
                raise self.NodeStorageError(
                    "There is no existing configuration at {}".format(
                        self.root_dir))

        return bool(
            all(
                map(os.path.isdir, (self.root_dir, self.metadata_dir,
                                    self.certificates_dir))))
class AnalyzeGas:
    """
    Callable twisted log observer with built-in record-keeping for gas estimation runs.
    """

    # Logging
    LOG_NAME = 'estimate-gas'
    LOG_FILENAME = '{}.log.json'.format(LOG_NAME)
    OUTPUT_DIR = os.path.join(abspath(dirname(__file__)), 'results')
    JSON_OUTPUT_FILENAME = '{}.json'.format(LOG_NAME)

    _PATTERN = re.compile(
        r'''
                          ^          # Anchor at the start of a string
                          (.+)       # Any character sequence longer than 1; Captured
                          \s=\s      # Space-Equal-Space
                          (\d+)      # A sequence of digits; Captured
                          $          # Anchor at the end of the string
                          ''', re.VERBOSE)

    def __init__(self) -> None:

        self.log = Logger(self.__class__.__name__)
        self.gas_estimations = dict()

        if not os.path.isdir(self.OUTPUT_DIR):
            os.mkdir(self.OUTPUT_DIR)

    @provider(ILogObserver)
    def __call__(self, event, *args, **kwargs) -> None:

        if event.get('log_namespace') == self.LOG_NAME:
            message = event.get("log_format")

            matches = self._PATTERN.match(message)
            if not matches:
                self.log.debug("No match for {} with pattern {}".format(
                    message, self._PATTERN))
                return

            label, gas = matches.groups()
            self.paint_line(label, gas)
            self.gas_estimations[label] = int(gas)

    @staticmethod
    def paint_line(label: str, gas: str) -> None:
        print('{label} {gas:,}'.format(label=label.ljust(70, '.'),
                                       gas=int(gas)))

    def to_json_file(self) -> None:
        print('Saving JSON Output...')

        epoch_time = str(int(time.time()))
        timestamped_filename = '{}-{}'.format(epoch_time,
                                              self.JSON_OUTPUT_FILENAME)
        filepath = os.path.join(self.OUTPUT_DIR, timestamped_filename)
        with open(filepath, 'w') as file:
            file.write(json.dumps(self.gas_estimations, indent=4))

    def start_collection(self) -> None:
        print("Starting Data Collection...")

        json_filepath = os.path.join(self.OUTPUT_DIR, AnalyzeGas.LOG_FILENAME)
        json_io = io.open(json_filepath, "w")
        json_observer = jsonFileLogObserver(json_io)
        globalLogPublisher.addObserver(json_observer)
        globalLogPublisher.addObserver(self)
Пример #40
0
class RainContent(GenericContent):
    def __init__(self, endpoint, factory):
        self.log = Logger(self.__class__.__name__)
        super().__init__(endpoint, factory)
        self.task = None

    def onBrokerConnected(self):
        self.task = task.LoopingCall(self.createForecast)
        self.task.start(self.config['RAIN_UPDATE_FREQ'], now=True)

    def _logTimeout(self, failure):
        failure.trap(CancelledError)
        self.log.error("Timeout of request to %s" % self.config["RAIN_DATA_SOURCE"])

    def _logFailure(self, failure):
        self.log.error("reported failure: {message}", message=failure.getErrorMessage())
        return failure

    def _logSuccess(self, success, url):
        self.log.info("Success requesting {url}", url=url)
        return success

    def createForecast(self):
        url = self.config["RAIN_DATA_SOURCE"]
        self.log.debug("Grabbing rain forecast URL '%s'" % url)
        d = treq.get(url, timeout=5)
        d.addCallbacks(self.grab_http_response, self._logTimeout)
        d.addCallbacks(self.parse_forecast_results)
        d.addCallbacks(self.create_forcast)
        d.addCallbacks(self.publish_forcast)
        d.addCallback(self._logSuccess, url)
        d.addErrback(self._logFailure, url)

    def create_forcast(self, data):
        if data[0][0] == 0:  # It's currently dry
            for rain, t in data:
                if rain > 0:
                    return "Rain at %s" % t
            return None
        else:  # It's raining.
            for rain, t in data:
                if rain == 0:
                    return "Rain stop %s" % t
            return "Rain Rain Rain"

    def grab_http_response(self, response):
        if response.code != 200:
            raise RuntimeError("Status is not 200 but '%s'" % response.code)
        return readBody(response)

    def parse_forecast_results(self, content):
        raw_str = content.decode()
        raw_arr = []
        for raw in raw_str.split("\r\n"):
            if not raw:
                continue
            rain_value, hour = raw.split("|")
            raw_arr.append([int(rain_value, 10), hour])
        if len(raw_arr) == 0:
            raise RuntimeWarning("API results where not in the expected format. They were: {}".format(raw_str))
        return raw_arr

    def publish_forcast(self, forcast_string: str):
        def _logAll(*args):
            self.log.debug("all publishing complete args={args!r}", args=args)
        if forcast_string is None:  # There's no Rain information to show.
            return
        msg = TextSingleLineLayout()
        msg.text = forcast_string
        msg.duration = self.config["RAIN_DISPLAY_DURATION"]
        msg.program = 'rain'
        msg.font_size = 15
        d = self.publish(topic=LEDSLIE_TOPIC_TYPESETTER_1LINE, message=msg, qos=1)
        d.addCallbacks(_logAll, self._logFailure)
        return d
Пример #41
0
class Rest(object):

    def __init__(
            self,
            host='https://developer-api.nest.com',
            token=None,
            event_handler=None,
            net_type='lan'):
        self.log = Logger()
        self.host = host
        self.token = token
        self.event_handler = event_handler
        self.pool = HTTPConnectionPool(reactor, persistent=True)
        self.loc = None
        self.reconnect = False
        self.fail_count = 0
        if event_handler:
            self.reconnect = True
            d = self.request(headers={'User-Agent': ['onDemand Rest Client'],
                                      'Accept': ['text/event-stream']})
            d.addCallback(self.on_disconnect)

    def __getattr__(self, name):
        try:
            super(Rest, self).__getattr__(name)
        except AttributeError:
            return RestCall(self, name)

    def on_disconnect(self, reason):
        if not reason:
            reason = {'reason': 'no_message'}
        self.log.critical(
            'disconnected: {reason}', reason=reason['reason'])
        if self.fail_count > 10:
            self.log.error('Max error count reached, aborting connection')

        def test_connectivity(count):
            if self.fail_count == count:
                self.fail_count = 0

        self.fail_count += 1
        c = self.fail_count
        reactor.callLater(10, test_connectivity, c)  # @UndefinedVariable
        if self.reconnect:
            d = self.request(headers={'User-Agent': ['onDemand Rest Client'],
                                      'Accept': ['text/event-stream']})
            d.addCallback(self.on_disconnect)

    def request(self, method='GET',
                path='',
                headers={'User-Agent': ['onDemand/1.0 (Rest_Client)'],
                         'Accept': ['application/json']},
                body=None):

        data = None
        if self.loc:
            host = '/'.join((self.loc, path))
        else:
            host = '/'.join((self.host, path))
        if self.token:
            host += '?auth=' + self.token
        if body:
            headers.update({'Content-Type': ['application/json']})
            data = FileBodyProducer(StringIO(json.dumps(body)))
        agent = RedirectAgent(Agent(reactor, pool=self.pool))
        d = agent.request(method, host, Headers(headers), data)

        def cbFail(fail):

            if hasattr(fail.value, 'response'):
                if hasattr(fail.value.response, 'code'):
                    if fail.value.response.code == 307:
                        loc = fail.value.response.headers.getRawHeaders(
                            'location')
                        new = urlparse(loc[0])
                        newhost = '://'.join((new.scheme, new.netloc))
                        if newhost == self.host:
                            self.loc = None
                        else:
                            self.loc = newhost
                        self.log.debug('redirect: %s' % self.loc)
                        data = FileBodyProducer(StringIO(json.dumps(body)))
                        d = agent.request(
                            method, loc[0], Headers(headers), data)
                        d.addCallbacks(cbRequest, cbFail)
                        return d
                    elif fail.value.response.code == 404 and self.loc:
                        self.loc = None
                        host = '/'.join((self.host, path))
                        if self.token:
                            host += '?auth=' + self.token
                        d = self.request(method, host, Headers(headers), body)
                        d.addCallbacks(cbRequest, cbFail)
                        return d
                else:
                    print(dir(fail.value))
                    print(fail.value.message)
                    print(fail.value.args)

            self.log.error('unhandled failure: %s -- %s' % (
                fail.value.message, fail.value))

        def cbRequest(response):
            #  print 'Response version:', response.version
            #  print 'Response code:', response.code
            #  print 'Response phrase:', response.phrase
            #  print 'Response headers:'
            #  print pformat(list(response.headers.getAllRawHeaders()))
            finished = Deferred()
            response.deliverBody(RestHandle(finished, self.event_handler))
            return finished
        d.addCallbacks(cbRequest, cbFail)
        return d
Пример #42
0
class Modem(object):
    '''
    classdocs
    '''

    def __init__(self, protocol, event_fct=None):
        '''
        Constructor
        '''
        self.log = Logger()
        self.first = True
        self.event = event_fct
        self.callback = None
        self.wait = False
        self.response = ''
        self.protocol = protocol
        self.protocol.addCallback(self.receive)
        self.resp_re = re.compile(
                    r'^OK|ERROR|(\+CM[ES] ERROR: \d+)|(COMMAND NOT SUPPORT)$')
        
    def receive(self, line):
        if self.wait:
            if self.resp_re.match(line):
                self.wait = False
                self.response.append(line)
                if line.startswith('ERROR'):
                    self.log.critical('error from Modem: %s' % line)
                    if self.callback:
                        self.callback.errback(self.response)
                else:
                    if self.callback:
                        self.callback.callback(self.response)
                self.response = ''
                if self.callback:
                    self.callback = None
            else:
                self.response.append(line)
        elif self.event:
            self.event(line)   
        else:
            self.log.debug('unmanaged message from Modem: %s' % line)
            
    def sendsms(self, recipient, message, callback_fct=None):
        def recipient_set(res):
            self.log.debug(
                'do we have > ? ==> %s' % ('OK' if res == '>' else 'No: ' + res))
            self.callback = Deferred
            if callback_fct:
                self.callback.addCallback(callback_fct)
            self.wait = True
            self.protocol.write(message + b'\x1a')
        def text_mode(res):
            self.callback = Deferred
            self.callback.addCallback(recipient_set)
            self.wait = True
            self.protocol.write(b'AT+CMGS="' + recipient.encode() + b'"\r')
        def modem_init(res):
            self.first = False
            self.callback = Deferred
            self.callback.addCallback(text_mode)
            self.wait = True
            self.protocol.write(b'AT+CMGF=1\r')
        if self.first:
            self.wait = True
            self.callback = Deferred()
            self.callback.addCallback(modem_init)
            self.protocol.write(b'ATZ\r')
        else:
            modem_init('OK')
        
                
Пример #43
0
class AvailabilityTracker:

    FAST_INTERVAL = 15  # Seconds
    SLOW_INTERVAL = 60 * 2
    SEEDING_DURATION = 60
    MAXIMUM_ALONE_TIME = 120

    MAXIMUM_SCORE = 10.0  # Score
    SAMPLE_SIZE = 1  # Ursulas
    SENSITIVITY = 0.5  # Threshold
    CHARGE_RATE = 0.9  # Measurement Multiplier

    class Unreachable(RuntimeError):
        pass

    class Solitary(Unreachable):
        message = "Cannot connect to any teacher nodes."

    class Lonely(Unreachable):
        message = "Cannot connect to enough teacher nodes."

    def __init__(self, ursula, enforce_loneliness: bool = True):

        self.log = Logger(self.__class__.__name__)
        self._ursula = ursula
        self.enforce_loneliness = enforce_loneliness

        self.__excuses = dict()  # List of failure reasons
        self.__score = 10
        # 10 == Perfect Score
        self.warnings = {
            9: self.mild_warning,
            7: self.medium_warning,
            2: self.severe_warning,
            1: self.shutdown_everything  # 0 is unobtainable
        }

        self._start_time = None
        self.__active_measurement = False
        self.__task = LoopingCall(self.maintain)
        self.responders = set()

    @property
    def excuses(self):
        return self.__excuses

    def mild_warning(self) -> None:
        self.log.info(
            f'[UNREACHABLE NOTICE (SCORE {self.score})] This node was recently reported as unreachable.'
        )

    def medium_warning(self) -> None:
        self.log.warn(
            f'[UNREACHABLE CAUTION (SCORE {self.score})] This node is reporting as unreachable.'
            f'Please check your network and firewall configuration.')

    def severe_warning(self) -> None:
        self.log.warn(
            f'[UNREACHABLE WARNING (SCORE {self.score})] '
            f'Please check your network and firewall configuration.'
            f'Auto-shutdown will commence soon if the services do not become available.'
        )

    def shutdown_everything(self, reason=None, halt_reactor=False):
        self.log.warn(
            f'[NODE IS UNREACHABLE (SCORE {self.score})] Commencing auto-shutdown sequence...'
        )
        self._ursula.stop(halt_reactor=False)
        try:
            if reason:
                raise reason(reason.message)
            raise self.Unreachable(
                f'{self._ursula} is unreachable (scored {self.score}).')
        finally:
            if halt_reactor:
                self._halt_reactor()

    @staticmethod
    def _halt_reactor() -> None:
        if reactor.running:
            reactor.stop()

    def handle_measurement_errors(self,
                                  crash_on_error: bool = False,
                                  *args,
                                  **kwargs) -> None:

        if args:
            failure = args[0]
            cleaned_traceback = failure.getTraceback().replace(
                '{', '').replace('}', '')  # FIXME: Amazing.
            self.log.warn(
                "Unhandled error during availability check: {}".format(
                    cleaned_traceback))
            if crash_on_error:
                failure.raiseException()
        else:
            # Restart on failure
            if not self.running:
                self.log.debug(f"Availability check crashed, restarting...")
                self.start(now=True)

    def status(self) -> bool:
        """Returns current indication of availability"""
        result = self.score > (self.SENSITIVITY * self.MAXIMUM_SCORE)
        if not result:
            for time, reason in self.__excuses.items():
                self.log.info(f'[{time}] - {reason["error"]}')
        return result

    @property
    def running(self) -> bool:
        return self.__task.running

    def start(self, now: bool = False):
        if not self.running:
            self._start_time = maya.now()
            d = self.__task.start(interval=self.FAST_INTERVAL, now=now)
            d.addErrback(self.handle_measurement_errors)

    def stop(self) -> None:
        if self.running:
            self.__task.stop()

    def maintain(self) -> None:
        known_nodes_is_smaller_than_sample_size = len(
            self._ursula.known_nodes) < self.SAMPLE_SIZE

        # If there are no known nodes or too few known nodes, skip this round...
        # ... but not for longer than the maximum allotted alone time
        if known_nodes_is_smaller_than_sample_size:
            if not self._ursula.lonely and self.enforce_loneliness:
                now = maya.now().epoch
                delta = now - self._start_time.epoch
                if delta >= self.MAXIMUM_ALONE_TIME:
                    self.severe_warning()
                    reason = self.Solitary if not self._ursula.known_nodes else self.Lonely
                    self.shutdown_everything(reason=reason)
            return

        if self.__task.interval == self.FAST_INTERVAL:
            now = maya.now().epoch
            delta = now - self._start_time.epoch
            if delta >= self.SEEDING_DURATION:
                # Slow down
                self.__task.interval = self.SLOW_INTERVAL
                return

        if self.__active_measurement:
            self.log.debug(
                f"Availability check already in progress - skipping this round (Score: {self.score}). "
            )
            return  # Abort
        else:
            self.log.debug(
                f"Continuing to measure availability (Score: {self.score}).")
            self.__active_measurement = True

        try:
            self.measure_sample()
        finally:
            self.__active_measurement = False

        delta = maya.now() - self._start_time
        self.log.info(
            f"Current availability score is {self.score} measured since {delta}"
        )
        self.issue_warnings()

    def issue_warnings(self, cascade: bool = True) -> None:
        warnings = sorted(self.warnings.items(), key=lambda t: t[0])
        for threshold, action in warnings:
            if self.score <= threshold:
                action()
                if not cascade:
                    # Exit after the first active warning is issued
                    return

    def sample(self, quantity: int) -> list:
        population = tuple(self._ursula.known_nodes._nodes.values())
        ursulas = random.sample(population=population, k=quantity)
        return ursulas

    @property
    def score(self) -> float:
        return self.__score

    def record(self, result: bool = None, reason: dict = None) -> None:
        """Score the result and cache it."""
        if (not result) and reason:
            self.__excuses[maya.now().epoch] = reason
        if result is None:
            return  # Actually nevermind, dont score this one...
        score = int(result) + self.CHARGE_RATE * self.__score
        if score >= self.MAXIMUM_SCORE:
            self.__score = self.MAXIMUM_SCORE
        else:
            self.__score = score
        self.log.debug(f"Recorded new uptime score ({self.score})")

    def measure_sample(self, ursulas: list = None) -> None:
        """
        Measure self-availability from a sample of Ursulas or automatically from known nodes.
        Handle the possibility of unreachable or invalid remote nodes in the sample.
        """

        # TODO: Relocate?
        Unreachable = (*NodeSeemsToBeDown, self._ursula.NotStaking,
                       self._ursula.node_storage.InvalidNodeCertificate,
                       self._ursula.network_middleware.UnexpectedResponse)

        if not ursulas:
            ursulas = self.sample(quantity=self.SAMPLE_SIZE)

        for ursula_or_sprout in ursulas:
            try:
                self.measure(ursula_or_sprout=ursula_or_sprout)
            except self._ursula.network_middleware.NotFound:
                # Ignore this measurement and move on because the remote node is not compatible.
                self.record(None,
                            reason={
                                "error":
                                "Remote node did not support 'ping' endpoint."
                            })
            except Unreachable as e:
                # This node is either not an Ursula, not available, does not support uptime checks, or is not staking...
                # ...do nothing and move on without changing the score.
                self.log.debug(
                    f'{ursula_or_sprout} responded to uptime check with {e.__class__.__name__}'
                )
                continue

    def measure(self, ursula_or_sprout: Union['Ursula', NodeSprout]) -> None:
        """Measure self-availability from a single remote node that participates uptime checks."""
        try:
            response = self._ursula.network_middleware.check_rest_availability(
                initiator=self._ursula, responder=ursula_or_sprout)
        except RestMiddleware.BadRequest as e:
            self.responders.add(ursula_or_sprout.checksum_address)
            self.record(False, reason=e.reason)
        else:
            # Record response
            self.responders.add(ursula_or_sprout.checksum_address)
            if response.status_code == 200:
                self.record(True)
            elif response.status_code == 400:
                self.record(
                    False,
                    reason={
                        'failed':
                        f"{ursula_or_sprout.checksum_address} reported unavailability."
                    })
            else:
                self.record(
                    None,
                    reason={
                        "error":
                        f"{ursula_or_sprout.checksum_address} returned {response.status_code} from 'ping' endpoint."
                    })
Пример #44
0
class SolidityCompiler:

    __default_compiler_version = 'v0.5.9'
    __default_contract_version = 'v0.0.0'
    __default_configuration_path = os.path.join(dirname(abspath(__file__)),
                                                './compiler.json')

    __default_sol_binary_path = shutil.which('solc')
    if __default_sol_binary_path is None:
        __bin_path = os.path.dirname(sys.executable)  # type: str
        __default_sol_binary_path = os.path.join(__bin_path,
                                                 'solc')  # type: str

    __default_contract_dir = os.path.join(dirname(abspath(__file__)), 'source')
    __default_chain_name = 'tester'

    __compiled_contracts_dir = 'contracts'
    __zeppelin_library_dir = 'zeppelin'

    optimization_runs = 200

    class CompilerError(Exception):
        pass

    @classmethod
    def default_contract_dir(cls):
        return cls.__default_contract_dir

    def __init__(self,
                 solc_binary_path: str = None,
                 configuration_path: str = None,
                 chain_name: str = None,
                 source_dirs: List[SourceDirs] = None) -> None:

        self.log = Logger('solidity-compiler')
        # Compiler binary and root solidity source code directory
        self.__sol_binary_path = solc_binary_path if solc_binary_path is not None else self.__default_sol_binary_path
        if source_dirs is None or len(source_dirs) == 0:
            self.source_dirs = [
                SourceDirs(root_source_dir=self.__default_contract_dir)
            ]
        else:
            self.source_dirs = source_dirs

        # JSON config
        self.__configuration_path = configuration_path if configuration_path is not None else self.__default_configuration_path
        self._chain_name = chain_name if chain_name is not None else self.__default_chain_name

        # Set the local env's solidity compiler binary
        os.environ['SOLC_BINARY'] = self.__sol_binary_path

    def install_compiler(self, version: str = None):
        """
        Installs the specified solidity compiler version.
        https://github.com/ethereum/py-solc#installing-the-solc-binary
        """
        version = version if version is not None else self.__default_compiler_version
        return install_solc(
            version, platform=None)  # TODO: #1478 - Implement or remove this

    def compile(self) -> dict:
        interfaces = dict()
        for root_source_dir, other_source_dirs in self.source_dirs:
            if root_source_dir is None:
                self.log.warn("One of the root directories is None")
                continue

            raw_interfaces = self._compile(root_source_dir, other_source_dirs)
            for name, data in raw_interfaces.items():
                # Extract contract version from docs
                version_search = re.search(
                    r"""
                
                \"details\":  # @dev tag in contract docs
                \".*?         # Skip any data in the beginning of details
                \|            # Beginning of version definition |
                (v            # Capture version starting from symbol v
                \d+           # At least one digit of major version
                \.            # Digits splitter
                \d+           # At least one digit of minor version
                \.            # Digits splitter
                \d+           # At least one digit of patch
                )             # End of capturing
                \|            # End of version definition |
                .*?\"         # Skip any data in the end of details
                
                """, data['devdoc'], re.VERBOSE)
                version = version_search.group(
                    1) if version_search else self.__default_contract_version
                try:
                    existence_data = interfaces[name]
                except KeyError:
                    existence_data = dict()
                    interfaces.update({name: existence_data})
                if version not in existence_data:
                    existence_data.update({version: data})
        return interfaces

    def _compile(self, root_source_dir: str, other_source_dirs: [str]) -> dict:
        """Executes the compiler with parameters specified in the json config"""

        self.log.info("Using solidity compiler binary at {}".format(
            self.__sol_binary_path))
        contracts_dir = os.path.join(root_source_dir,
                                     self.__compiled_contracts_dir)
        self.log.info(
            "Compiling solidity source files at {}".format(contracts_dir))

        source_paths = set()
        source_walker = os.walk(top=contracts_dir, topdown=True)
        if other_source_dirs is not None:
            for source_dir in other_source_dirs:
                other_source_walker = os.walk(top=source_dir, topdown=True)
                source_walker = itertools.chain(source_walker,
                                                other_source_walker)

        for root, dirs, files in source_walker:
            for filename in files:
                if filename.endswith('.sol'):
                    path = os.path.join(root, filename)
                    source_paths.add(path)
                    self.log.debug(
                        "Collecting solidity source {}".format(path))

        # Compile with remappings: https://github.com/ethereum/py-solc
        zeppelin_dir = os.path.join(root_source_dir,
                                    self.__zeppelin_library_dir)

        remappings = (
            "contracts={}".format(contracts_dir),
            "zeppelin={}".format(zeppelin_dir),
        )

        self.log.info("Compiling with import remappings {}".format(
            ", ".join(remappings)))

        optimization_runs = self.optimization_runs
        try:
            compiled_sol = compile_files(source_files=source_paths,
                                         import_remappings=remappings,
                                         allow_paths=root_source_dir,
                                         optimize=True,
                                         optimize_runs=optimization_runs)

            self.log.info(
                "Successfully compiled {} contracts with {} optimization runs".
                format(len(compiled_sol), optimization_runs))

        except FileNotFoundError:
            raise RuntimeError(
                "The solidity compiler is not at the specified path. "
                "Check that the file exists and is executable.")
        except PermissionError:
            raise RuntimeError(
                "The solidity compiler binary at {} is not executable. "
                "Check the file's permissions.".format(self.__sol_binary_path))

        except SolcError:
            raise

        # Cleanup the compiled data keys
        interfaces = {
            name.split(':')[-1]: compiled_sol[name]
            for name in compiled_sol
        }
        return interfaces
Пример #45
0
class IRCd(Service):
	def __init__(self, configFileName):
		self.config = Config(self, configFileName)
		
		self.boundPorts = {}
		self.loadedModules = {}
		self._loadedModuleData = {}
		self._unloadingModules = {}
		self.commonModules = set()
		self.userCommands = {}
		self.serverCommands = {}
		self.channelModes = ({}, {}, {}, {})
		self.channelStatuses = {}
		self.channelStatusSymbols = {}
		self.channelStatusOrder = []
		self.channelModeTypes = {}
		self.userModes = ({}, {}, {}, {})
		self.userModeTypes = {}
		self.actions = {}
		self.storage = None
		self.storageSyncer = None
		self.dataCache = {}
		self.functionCache = {}
		
		self.serverID = None
		self.name = None
		self.isupport_tokens = {
			"CASEMAPPING": "strict-rfc1459",
			"CHANTYPES": "#",
		}
		self._uid = self._genUID()
		
		self.users = {}
		self.userNicks = CaseInsensitiveDictionary()
		self.channels = CaseInsensitiveDictionary(WeakValueDictionary)
		self.servers = {}
		self.serverNames = CaseInsensitiveDictionary()
		self.recentlyQuitUsers = {}
		self.recentlyQuitServers = {}
		self.recentlyDestroyedChannels = CaseInsensitiveDictionary()
		self.pruneRecentlyQuit = None
		self.pruneRecentChannels = None
		
		self._logFilter = LogLevelFilterPredicate()
		filterObserver = FilteringLogObserver(globalLogPublisher, (self._logFilter,))
		self.log = Logger("txircd", observer=filterObserver)
		
		self.startupTime = None
	
	def startService(self):
		self.log.info("Starting up...")
		self.startupTime = now()
		self.log.info("Loading configuration...")
		self.config.reload()
		self.name = self.config["server_name"]
		self.serverID = self.config["server_id"]
		self.log.info("Loading storage...")
		self.storage = shelve.open(self.config["datastore_path"], writeback=True)
		self.storageSyncer = LoopingCall(self.storage.sync)
		self.storageSyncer.start(self.config.get("storage_sync_interval", 5), now=False)
		self.log.info("Starting processes...")
		self.pruneRecentlyQuit = LoopingCall(self.pruneQuit)
		self.pruneRecentlyQuit.start(10, now=False)
		self.pruneRecentChannels = LoopingCall(self.pruneChannels)
		self.pruneRecentChannels.start(15, now=False)
		self.log.info("Loading modules...")
		self._loadModules()
		self.log.info("Binding ports...")
		self._bindPorts()
		self.log.info("txircd started!")
		try:
			self._logFilter.setLogLevelForNamespace("txircd", LogLevel.levelWithName(self.config["log_level"]))
		except (KeyError, InvalidLogLevelError):
			self._logFilter.setLogLevelForNamespace("txircd", LogLevel.warn)
		self.runActionStandard("startup")
	
	def stopService(self):
		stopDeferreds = []
		self.log.info("Disconnecting servers...")
		serverList = self.servers.values() # Take the list of server objects
		self.servers = {} # And then destroy the server dict to inhibit server objects generating lots of noise
		for server in serverList:
			if server.nextClosest == self.serverID:
				stopDeferreds.append(server.disconnectedDeferred)
				allUsers = self.users.keys()
				for user in allUsers:
					if user[:3] == server.serverID:
						del self.users[user]
				server.transport.loseConnection()
		self.log.info("Disconnecting users...")
		userList = self.users.values() # Basically do the same thing I just did with the servers
		self.users = {}
		for user in userList:
			if user.transport:
				stopDeferreds.append(user.disconnectedDeferred)
				user.transport.loseConnection()
		self.log.info("Unloading modules...")
		moduleList = self.loadedModules.keys()
		for module in moduleList:
			self._unloadModule(module, False) # Incomplete unload is done to save time and because side effects are destroyed anyway
		self.log.info("Stopping processes...")
		if self.pruneRecentlyQuit.running:
			self.pruneRecentlyQuit.stop()
		if self.pruneRecentChannels.running:
			self.pruneRecentChannels.stop()
		self.log.info("Closing data storage...")
		if self.storageSyncer.running:
			self.storageSyncer.stop()
		self.storage.close() # a close() will sync() also
		self.log.info("Releasing ports...")
		stopDeferreds.extend(self._unbindPorts())
		return DeferredList(stopDeferreds)
	
	def _loadModules(self):
		for module in getPlugins(IModuleData, txircd.modules):
			if module.name in self.loadedModules:
				continue
			if module.core or module.name in self.config["modules"]:
				self._loadModuleData(module)
		for moduleName in self.config["modules"]:
			if moduleName not in self.loadedModules:
				self.log.warn("The module {module} failed to load.", module=moduleName)
	
	def loadModule(self, moduleName):
		"""
		Loads a module of the specified name.
		Raises ModuleLoadError if the module cannot be loaded.
		If the specified module is currently being unloaded, returns the
		DeferredList specified by the module when it was unloading with a
		callback to try to load the module again once it succeeds.
		"""
		if moduleName in self._unloadingModules:
			deferList = self._unloadingModules[moduleName]
			deferList.addCallback(self._tryLoadAgain, moduleName)
			return deferList
		for module in getPlugins(IModuleData, txircd.modules):
			if module.name == moduleName:
				rebuild(importlib.import_module(module.__module__)) # getPlugins doesn't recompile modules, so let's do that ourselves.
				self._loadModuleData(module)
				self.log.info("Loaded module {module}.", module=moduleName)
				break
	
	def _tryLoadAgain(self, _, moduleName):
		self.loadModule(moduleName)
	
	def _loadModuleData(self, module):
		if not IModuleData.providedBy(module):
			raise ModuleLoadError ("???", "Module does not implement module interface")
		if not module.name:
			raise ModuleLoadError ("???", "Module did not provide a name")
		if module.name in self.loadedModules:
			self.log.debug("Not loading {module.name} because it's already loaded", module=module)
			return
		
		self.log.debug("Beginning to load {module.name}...", module=module)
		module.hookIRCd(self)
		try:
			module.verifyConfig(self.config)
		except ConfigError as e:
			raise ModuleLoadError(module.name, e)
		
		self.log.debug("Loading hooks from {module.name}...", module=module)
		moduleData = {
			"channelmodes": module.channelModes(),
			"usermodes": module.userModes(),
			"actions": module.actions(),
			"usercommands": module.userCommands(),
			"servercommands": module.serverCommands()
		}
		newChannelModes = ({}, {}, {}, {})
		newChannelStatuses = {}
		newUserModes = ({}, {}, {}, {})
		newActions = {}
		newUserCommands = {}
		newServerCommands = {}
		common = False
		self.log.debug("Processing hook data from {module.name}...", module=module)
		for mode in moduleData["channelmodes"]:
			if mode[0] in self.channelModeTypes:
				raise ModuleLoadError (module.name, "Tries to implement channel mode +{} when that mode is already implemented.".format(mode[0]))
			if not IMode.providedBy(mode[2]):
				raise ModuleLoadError (module.name, "Returns a channel mode object (+{}) that doesn't implement IMode.".format(mode[0]))
			if mode[1] == ModeType.Status:
				if mode[4] in self.channelStatusSymbols:
					raise ModuleLoadError (module.name, "Tries to create a channel rank with symbol {} when that symbol is already in use.".format(mode[4]))
				try:
					newChannelStatuses[mode[0]] = (mode[4], mode[3], mode[2])
				except IndexError:
					raise ModuleLoadError (module.name, "Specifies channel status mode {} without a rank or symbol".format(mode[0]))
			else:
				newChannelModes[mode[1]][mode[0]] = mode[2]
			common = True
		for mode in moduleData["usermodes"]:
			if mode[0] in self.userModeTypes:
				raise ModuleLoadError (module.name, "Tries to implement user mode +{} when that mode is already implemented.".format(mode[0]))
			if not IMode.providedBy(mode[2]):
				raise ModuleLoadError (module.name, "Returns a user mode object (+{}) that doesn't implement IMode.".format(mode[0]))
			newUserModes[mode[1]][mode[0]] = mode[2]
			common = True
		for action in moduleData["actions"]:
			if action[0] not in newActions:
				newActions[action[0]] = [(action[2], action[1])]
			else:
				newActions[action[0]].append((action[2], action[1]))
		for command in moduleData["usercommands"]:
			if not ICommand.providedBy(command[2]):
				raise ModuleLoadError (module.name, "Returns a user command object ({}) that doesn't implement ICommand.".format(command[0]))
			if command[0] not in newUserCommands:
				newUserCommands[command[0]] = []
			newUserCommands[command[0]].append((command[2], command[1]))
		for command in moduleData["servercommands"]:
			if not ICommand.providedBy(command[2]):
				raise ModuleLoadError (module.name, "Returns a server command object ({}) that doesnt implement ICommand.".format(command[0]))
			if command[0] not in newServerCommands:
				newServerCommands[command[0]] = []
			newServerCommands[command[0]].append((command[2], command[1]))
			common = True
		if not common:
			common = module.requiredOnAllServers

		self.log.debug("Loaded data from {module.name}; committing data and calling hooks...", module=module)
		
		module.load()
		
		self.loadedModules[module.name] = module
		self._loadedModuleData[module.name] = moduleData
		if common:
			self.commonModules.add(module.name)
		
		self.runActionStandard("moduleload", module.name)
		
		for modeType, typeSet in enumerate(newChannelModes):
			for mode, implementation in typeSet.iteritems():
				self.channelModeTypes[mode] = modeType
				self.channelModes[modeType][mode] = implementation
		for mode, data in newChannelStatuses.iteritems():
			self.channelModeTypes[mode] = ModeType.Status
			self.channelStatuses[mode] = data
			self.channelStatusSymbols[data[0]] = mode
			for index, status in enumerate(self.channelStatusOrder):
				if self.channelStatuses[status][1] < data[1]:
					self.channelStatusOrder.insert(index, mode)
					break
			else:
				self.channelStatusOrder.append(mode)
		for modeType, typeSet in enumerate(newUserModes):
			for mode, implementation in typeSet.iteritems():
				self.userModeTypes[mode] = modeType
				self.userModes[modeType][mode] = implementation
		for action, actionList in newActions.iteritems():
			if action not in self.actions:
				self.actions[action] = []
			for actionData in actionList:
				for index, handlerData in enumerate(self.actions[action]):
					if handlerData[1] < actionData[1]:
						self.actions[action].insert(index, actionData)
						break
				else:
					self.actions[action].append(actionData)
		for command, dataList in newUserCommands.iteritems():
			if command not in self.userCommands:
				self.userCommands[command] = []
			for data in dataList:
				for index, cmd in enumerate(self.userCommands[command]):
					if cmd[1] < data[1]:
						self.userCommands[command].insert(index, data)
						break
				else:
					self.userCommands[command].append(data)
		for command, dataList in newServerCommands.iteritems():
			if command not in self.serverCommands:
				self.serverCommands[command] = []
			for data in dataList:
				for index, cmd in enumerate(self.serverCommands[command]):
					if cmd[1] < data[1]:
						self.serverCommands[command].insert(index, data)
						break
				else:
					self.serverCommands[command].append(data)
		
		self.log.debug("Module {module.name} is now fully loaded.", module=module)
	
	def unloadModule(self, moduleName):
		"""
		Unloads the loaded module with the given name. Raises ValueError
		if the module cannot be unloaded because it's a core module.
		"""
		self._unloadModule(moduleName, True)
		self.log.info("Unloaded module {module}.", module=moduleName)
	
	def _unloadModule(self, moduleName, fullUnload):
		unloadDeferreds = []
		if moduleName not in self.loadedModules:
			return
		module = self.loadedModules[moduleName]
		if fullUnload and module.core:
			raise ValueError ("The module you're trying to unload is a core module.")
		moduleData = self._loadedModuleData[moduleName]
		d = module.unload()
		if d is not None:
			unloadDeferreds.append(d)
		
		if fullUnload:
			d = module.fullUnload()
			if d is not None:
				unloadDeferreds.append(d)
		
		for modeData in moduleData["channelmodes"]:
			if fullUnload: # Unset modes on full unload
				if modeData[1] == ModeType.Status:
					for channel in self.channels.itervalues():
						removeFromChannel = []
						for user, userData in channel.user.iteritems():
							if modeData[0] in userData["status"]:
								removeFromChannel.append((False, modeData[0], user.uuid))
						channel.setModes(removeFromChannel, self.serverID)
				elif modeData[1] == ModeType.List:
					for channel in self.channels.itervalues():
						if modeData[0] in channel.modes:
							removeFromChannel = []
							for paramData in channel.modes[modeData[0]]:
								removeFromChannel.append((False, modeData[0], paramData[0]))
							channel.setModes(removeFromChannel, self.serverID)
				else:
					for channel in self.channels.itervalues():
						if modeData[0] in channel.modes:
							channel.setModes([(False, modeData[0], channel.modes[modeData[0]])], self.serverID)
			
			if modeData[1] == ModeType.Status:
				del self.channelStatuses[modeData[0]]
				del self.channelStatusSymbols[modeData[4]]
				self.channelStatusOrder.remove(modeData[0])
			else:
				del self.channelModes[modeData[1]][modeData[0]]
			del self.channelModeTypes[modeData[0]]
		for modeData in moduleData["usermodes"]:
			if fullUnload: # Unset modes on full unload
				if modeData[1] == ModeType.List:
					for user in self.users.itervalues():
						if modeData[0] in user.modes:
							removeFromUser = []
							for paramData in user.modes[modeData[0]]:
								removeFromUser.append((False, modeData[0], paramData[0]))
							user.setModes(removeFromUser, self.serverID)
				else:
					for user in self.users.itervalues():
						if modeData[0] in user.modes:
							user.setModes([(False, modeData[0], user.modes[modeData[0]])], self.serverID)
			
			del self.userModes[modeData[1]][modeData[0]]
			del self.userModeTypes[modeData[0]]
		for actionData in moduleData["actions"]:
			self.actions[actionData[0]].remove((actionData[2], actionData[1]))
			if not self.actions[actionData[0]]:
				del self.actions[actionData[0]]
		for commandData in moduleData["usercommands"]:
			self.userCommands[commandData[0]].remove((commandData[2], commandData[1]))
			if not self.userCommands[commandData[0]]:
				del self.userCommands[commandData[0]]
		for commandData in moduleData["servercommands"]:
			self.serverCommands[commandData[0]].remove((commandData[2], commandData[1]))
			if not self.serverCommands[commandData[0]]:
				del self.serverCommands[commandData[0]]
		
		del self.loadedModules[moduleName]
		del self._loadedModuleData[moduleName]
		
		if fullUnload:
			self.runActionStandard("moduleunload", module.name)
		
		if unloadDeferreds:
			deferList = DeferredList(unloadDeferreds)
			self._unloadingModules[moduleName] = deferList
			deferList.addCallback(self._removeFromUnloadingList, moduleName)
			return deferList
	
	def _removeFromUnloadingList(self, _, moduleName):
		del self._unloadingModules[moduleName]
	
	def reloadModule(self, moduleName):
		"""
		Reloads the module with the given name.
		Returns a DeferredList if the module unloads with one or more Deferreds.
		May raise ModuleLoadError if the module cannot be loaded.
		"""
		deferList = self._unloadModule(moduleName, False)
		if deferList is None:
			deferList = self.loadModule(moduleName)
		else:
			deferList.addCallback(lambda result: self.loadModule(moduleName))
		return deferList

	def verifyConfig(self, config):
		# IRCd
		if "server_name" not in config:
			raise ConfigValidationError("server_name", "required item not found in configuration file.")
		if not isinstance(config["server_name"], basestring):
			raise ConfigValidationError("server_name", "value must be a string")
		if len(config["server_name"]) > 64:
			config["server_name"] = config["server_name"][:64]
			self.logConfigValidationWarning("server_name", "value is too long and has been truncated", config["server_name"])
		if not re.match(r"^[a-zA-Z0-9.-]+\.[a-zA-Z0-9.-]+$", config["server_name"]):
			raise ConfigValidationError("server_name", "server name must look like a valid hostname.")
		if "server_id" in config:
			if not isinstance(config["server_id"], basestring):
				raise ConfigValidationError("server_id", "value must be a string")
			else:
				config["server_id"] = config["server_id"].upper()
		else:
			randFromName = random.Random(config["server_name"])
			serverID = randFromName.choice(string.digits) + randFromName.choice(string.digits + string.ascii_uppercase) + randFromName.choice(string.digits + string.ascii_uppercase)
			config["server_id"] = serverID
		if len(config["server_id"]) != 3 or not config["server_id"].isalnum() or not config["server_id"][0].isdigit():
			raise ConfigValidationError("server_id", "value must be a 3-character alphanumeric string starting with a number.")
		if "server_description" not in config:
			raise ConfigValidationError("server_description", "required item not found in configuration file.")
		if not isinstance(config["server_description"], basestring):
			raise ConfigValidationError("server_description", "value must be a string")
		if not config["server_description"]:
			raise ConfigValidationError("server_description", "value must not be an empty string")
		if len(config["server_description"]) > 255:
			config["server_description"] = config["server_description"][:255]
			self.logConfigValidationWarning("server_description", "value is too long and has been truncated", config["server_description"])
		if "network_name" not in config:
			raise ConfigValidationError("network_name", "required item not found in configuration file.")
		if not isinstance(config["network_name"], basestring):
			raise ConfigValidationError("network_name", "value must be a string")
		if not config["network_name"]:
			raise ConfigValidationError("network_name", "value must not be an empty string")
		if " " in config["network_name"]:
			raise ConfigValidationError("network_name", "value cannot have spaces")
		if len(config["network_name"]) > 32:
			config["network_name"] = config["network_name"][:32]
			self.logConfigValidationWarning("network_name", "value is too long", config["network_name"])
		if "bind_client" not in config:
			config["bind_client"] = [ "tcp:6667:interface={::}" ]
			self.logConfigValidationWarning("bind_client", "no default client binding specified", "[ \"tcp:6667:interface={::}\" ]")
		if not isinstance(config["bind_client"], list):
			raise ConfigValidationError("bind_client", "value must be a list")
		for bindDesc in config["bind_client"]:
			if not isinstance(bindDesc, basestring):
				raise ConfigValidationError("bind_client", "every entry must be a string")
		if "bind_server" not in config:
			config["bind_server"] = []
		if not isinstance(config["bind_server"], list):
			raise ConfigValidationError("bind_server", "value must be a list")
		for bindDesc in config["bind_server"]:
			if not isinstance(bindDesc, basestring):
				raise ConfigValidationError("bind_server", "every entry must be a string")
		if "modules" not in config:
			config["modules"] = []
		if not isinstance(config["modules"], list):
			raise ConfigValidationError("modules", "value must be a list")
		for module in config["modules"]:
			if not isinstance(module, basestring):
				raise ConfigValidationError("modules", "every entry must be a string")
		if "links" in config:
			if not isinstance(config["links"], dict):
				raise ConfigValidationError("links", "value must be a dictionary")
			for desc, server in config["links"].iteritems():
				if not isinstance(desc, basestring):
					raise ConfigValidationError("links", "\"{}\" is an invalid server description".format(desc))
				if not isinstance(server, dict):
					raise ConfigValidationError("links", "values for \"{}\" must be a dictionary".format(desc))
				if "connect_descriptor" not in server:
					raise ConfigValidationError("links", "server \"{}\" must contain a \"connect_descriptor\" value".format(desc))
				if "in_password" in server:
					if not isinstance(server["in_password"], basestring):
						config["links"][desc]["in_password"] = str(server["in_password"])
				if "out_password" in server:
					if not isinstance(server["out_password"], basestring):
						config["links"][desc]["out_password"] = str(server["out_password"])
		if "datastore_path" not in config:
			config["datastore_path"] = "data.db"
		if "storage_sync_interval" in config and not isinstance(config["storage_sync_interval"], int):
			raise ConfigValidationError(config["storage_sync_interval"], "invalid number")

		# Channels
		if "channel_name_length" in config:
			if not isinstance(config["channel_name_length"], int) or config["channel_name_length"] < 0:
				raise ConfigValidationError("channel_name_length", "invalid number")
			elif config["channel_name_length"] > 64:
				config["channel_name_length"] = 64
				self.logConfigValidationWarning("channel_name_length", "value is too large", 64)
		if "modes_per_line" in config:
			if not isinstance(config["modes_per_line"], int) or config["modes_per_line"] < 0:
				raise ConfigValidationError("modes_per_line", "invalid number")
			elif config["modes_per_line"] > 20:
				config["modes_per_line"] = 20
				self.logConfigValidationWarning("modes_per_line", "value is too large", 20)
		if "channel_listmode_limit" in config:
			if not isinstance(config["channel_listmode_limit"], int) or config["channel_listmode_limit"] < 0:
				raise ConfigValidationError("channel_listmode_limit", "invalid number")
			if config["channel_listmode_limit"] > 256:
				config["channel_listmode_limit"] = 256
				self.logConfigValidationWarning("channel_listmode_limit", "value is too large", 256)

		# Users
		if "user_registration_timeout" in config:
			if not isinstance(config["user_registration_timeout"], int) or config["user_registration_timeout"] < 0:
				raise ConfigValidationError("user_registration_timeout", "invalid number")
			elif config["user_registration_timeout"] < 10:
				config["user_registration_timeout"] = 10
				self.logConfigValidationWarning("user_registration_timeout", "timeout could be too short for clients to register in time", 10)
		if "user_ping_frequency" in config and (not isinstance(config["user_ping_frequency"], int) or config["user_ping_frequency"] < 0):
			raise ConfigValidationError("user_ping_frequency", "invalid number")
		if "hostname_length" in config:
			if not isinstance(config["hostname_length"], int) or config["hostname_length"] < 0:
				raise ConfigValidationError("hostname_length", "invalid number")
			elif config["hostname_length"] > 64:
				config["hostname_length"] = 64
				self.logConfigValidationWarning("hostname_length", "value is too large", 64)
			elif config["hostname_length"] < 4:
				config["hostname_length"] = 4
				self.logConfigValidationWarning("hostname_length", "value is too small", 4)
		if "ident_length" in config:
			if not isinstance(config["ident_length"], int) or config["ident_length"] < 0:
				raise ConfigValidationError("ident_length", "invalid number")
			elif config["ident_length"] > 12:
				config["ident_length"] = 12
				self.logConfigValidationWarning("ident_length", "value is too large", 12)
			elif config["ident_length"] < 1:
				config["ident_length"] = 1
				self.logConfigValidationWarning("ident_length", "value is too small", 1)
		if "gecos_length" in config:
			if not isinstance(config["gecos_length"], int) or config["gecos_length"] < 0:
				raise ConfigValidationError("gecos_length", "invalid number")
			elif config["gecos_length"] > 128:
				config["gecos_length"] = 128
				self.logConfigValidationWarning("gecos_length", "value is too large", 128)
			elif config["gecos_length"] < 1:
				config["gecos_length"] = 1
				self.logConfigValidationWarning("gecos_length", "value is too small", 1)
		if "user_listmode_limit" in config:
			if not isinstance(config["user_listmode_limit"], int) or config["user_listmode_limit"] < 0:
				raise ConfigValidationError("user_listmode_limit", "invalid number")
			if config["user_listmode_limit"] > 256:
				config["user_listmode_limit"] = 256
				self.logConfigValidationWarning("user_listmode_limit", "value is too large", 256)

		# Servers
		if "server_registration_timeout" in config:
			if not isinstance(config["server_registration_timeout"], int) or config["server_registration_timeout"] < 0:
				raise ConfigValidationError("server_registration_timeout", "invalid number")
			elif config["server_registration_timeout"] < 10:
				config["server_registration_timeout"] = 10
				self.logConfigValidationWarning("server_registration_timeout", "timeout could be too short for servers to register in time", 10)
		if "server_ping_frequency" in config and (not isinstance(config["server_ping_frequency"], int) or config["server_ping_frequency"] < 0):
			raise ConfigValidationError("server_ping_frequency", "invalid number")

		for module in self.loadedModules.itervalues():
			module.verifyConfig(config)

	def logConfigValidationWarning(self, key, message, default):
		self.log.warn("Config value \"{configKey}\" is invalid ({message}); the value has been set to a default of \"{default}\".", configKey=key, message=message, default=default)

	def rehash(self):
		"""
		Reloads the configuration file and applies changes.
		"""
		self.log.info("Rehashing...")
		self.config.reload()
		d = self._unbindPorts() # Unbind the ports that are bound
		if d: # And then bind the new ones
			DeferredList(d).addCallback(lambda result: self._bindPorts())
		else:
			self._bindPorts()
		
		try:
			self._logFilter.setLogLevelForNamespace("txircd", LogLevel.levelWithName(self.config["log_level"]))
		except (KeyError, InvalidLogLevelError):
			pass # If we can't set a new log level, we'll keep the old one
		
		for module in self.loadedModules.itervalues():
			module.rehash()
	
	def _bindPorts(self):
		for bindDesc in self.config["bind_client"]:
			try:
				endpoint = serverFromString(reactor, unescapeEndpointDescription(bindDesc))
			except ValueError as e:
				self.log.error(e)
				continue
			listenDeferred = endpoint.listen(UserFactory(self))
			listenDeferred.addCallback(self._savePort, bindDesc, "client")
			listenDeferred.addErrback(self._logNotBound, bindDesc)
		for bindDesc in self.config["bind_server"]:
			try:
				endpoint = serverFromString(reactor, unescapeEndpointDescription(bindDesc))
			except ValueError as e:
				self.log.error(e)
				continue
			listenDeferred = endpoint.listen(ServerListenFactory(self))
			listenDeferred.addCallback(self._savePort, bindDesc, "server")
			listenDeferred.addErrback(self._logNotBound, bindDesc)
	
	def _unbindPorts(self):
		deferreds = []
		for port in self.boundPorts.itervalues():
			d = port.stopListening()
			if d:
				deferreds.append(d)
		return deferreds
	
	def _savePort(self, port, desc, portType):
		self.boundPorts[desc] = port
		self.log.debug("Bound endpoint '{endpointDescription}' for {portType} connections.", endpointDescription=desc, portType=portType)
	
	def _logNotBound(self, err, desc):
		self.log.error("Could not bind '{endpointDescription}': {errorMsg}", endpointDescription=desc, errorMsg=err)
	
	def createUUID(self):
		"""
		Gets the next UUID for a new client.
		"""
		newUUID = self.serverID + self._uid.next()
		while newUUID in self.users: # It'll take over 1.5 billion connections to loop around, but we still
			newUUID = self.serverID + self._uid.next() # want to be extra safe and avoid collisions
		self.log.debug("Generated new UUID {uuid}", uuid=newUUID)
		return newUUID
	
	def _genUID(self):
		uid = "AAAAAA"
		while True:
			yield uid
			uid = self._incrementUID(uid)
	
	def _incrementUID(self, uid):
		if uid == "Z": # The first character must be a letter
			return "A" # So wrap that around
		if uid[-1] == "9":
			return self._incrementUID(uid[:-1]) + "A"
		if uid[-1] == "Z":
			return uid[:-1] + "0"
		return uid[:-1] + chr(ord(uid[-1]) + 1)
	
	def pruneQuit(self):
		compareTime = now() - timedelta(seconds=10)
		remove = []
		for uuid, timeQuit in self.recentlyQuitUsers.iteritems():
			if timeQuit < compareTime:
				remove.append(uuid)
		for uuid in remove:
			del self.recentlyQuitUsers[uuid]
		
		remove = []
		for serverID, timeQuit in self.recentlyQuitServers.iteritems():
			if timeQuit < compareTime:
				remove.append(serverID)
		for serverID in remove:
			del self.recentlyQuitServers[serverID]
	
	def pruneChannels(self):
		removeChannels = []
		for channel, remove in self.recentlyDestroyedChannels.iteritems():
			if remove:
				removeChannels.append(channel)
			elif channel not in self.channels:
				self.recentlyDestroyedChannels[channel] = True
		for channel in removeChannels:
			del self.recentlyDestroyedChannels[channel]
	
	def generateISupportList(self):
		isupport = self.isupport_tokens.copy()
		statusSymbolOrder = "".join([self.channelStatuses[status][0] for status in self.channelStatusOrder])
		isupport["CHANMODES"] = ",".join(["".join(modes) for modes in self.channelModes])
		isupport["CHANNELLEN"] = self.config.get("channel_name_length", 64)
		isupport["NETWORK"] = self.config["network_name"]
		isupport["PREFIX"] = "({}){}".format("".join(self.channelStatusOrder), statusSymbolOrder)
		isupport["STATUSMSG"] = statusSymbolOrder
		isupport["USERMODES"] = ",".join(["".join(modes) for modes in self.userModes])
		self.runActionStandard("buildisupport", isupport)
		isupportList = []
		for key, val in isupport.iteritems():
			if val is None:
				isupportList.append(key)
			else:
				isupportList.append("{}={}".format(key, val))
		return isupportList
	
	def connectServer(self, name):
		"""
		Connect a server with the given name in the configuration.
		Returns a Deferred for the connection when we can successfully connect
		or None if the server is already connected or if we're unable to find
		information for that server in the configuration.
		"""
		if name in self.serverNames:
			return None
		if name not in self.config.get("links", {}):
			return None
		serverConfig = self.config["links"][name]
		endpoint = clientFromString(reactor, unescapeEndpointDescription(serverConfig["connect_descriptor"]))
		d = endpoint.connect(ServerConnectFactory(self))
		d.addCallback(self._completeServerConnection, name)
		return d
	
	def _completeServerConnection(self, result, name):
		self.log.info("Connected to server {serverName}", serverName=name)
		self.runActionStandard("initiateserverconnection", result)
	
	def broadcastToServers(self, fromServer, command, *params, **kw):
		"""
		Broadcasts a message to all connected servers. The fromServer parameter
		should be the server from which the message came; if this server is the
		originating server, specify None for fromServer.
		"""
		for server in self.servers.itervalues():
			if server.nextClosest == self.serverID and server != fromServer:
				server.sendMessage(command, *params, **kw)
	
	def _getActionModes(self, actionName, *params, **kw):
		users = []
		channels = []
		if "users" in kw:
			users = kw["users"]
		if "channels" in kw:
			channels = kw["channels"]
		
		functionList = []
		
		if users:
			genericUserActionName = "modeactioncheck-user-{}".format(actionName)
			genericUserActionNameWithChannel = "modeactioncheck-user-withchannel-{}".format(actionName)
			for modeType in self.userModes:
				for mode, modeObj in modeType.iteritems():
					if actionName not in modeObj.affectedActions:
						continue
					priority = modeObj.affectedActions[actionName]
					actionList = []
					# Because Python doesn't properly capture variables in lambdas, we have to force static capture
					# by wrapping lambdas in more lambdas.
					# I wish Python wasn't this gross.
					for action in self.actions.get("modeactioncheck-user", []):
						actionList.append(((lambda action, actionName, mode: lambda user, *params: action[0](actionName, mode, user, *params))(action, actionName, mode), action[1]))
					for action in self.actions.get("modeactioncheck-user-withchannel", []):
						for channel in channels:
							actionList.append(((lambda action, actionName, mode, channel: lambda user, *params: action[0](actionName, mode, user, channel, *params))(action, actionName, mode, channel), action[1]))
					for action in self.actions.get(genericUserActionName, []):
						actionList.append(((lambda action, mode: lambda user, *params: action[0](mode, user, *params))(action, mode), action[1]))
					for action in self.actions.get(genericUserActionNameWithChannel, []):
						for channel in channels:
							actionList.append(((lambda action, mode, channel: lambda user, *params: action[0](mode, user, channel, *params))(action, mode, channel), action[1]))
					modeUserActionName = "modeactioncheck-user-{}-{}".format(mode, actionName)
					modeUserActionNameWithChannel = "modeactioncheck-user-withchannel-{}-{}".format(mode, actionName)
					for action in self.actions.get(modeUserActionNameWithChannel, []):
						for channel in channels:
							actionList.append(((lambda action, channel: lambda user, *params: action[0](user, channel, *params))(action, channel), action[1]))
					actionList = sorted(self.actions.get(modeUserActionName, []) + actionList, key=lambda action: action[1], reverse=True)
					applyUsers = []
					for user in users:
						for action in actionList:
							param = action[0](user, *params)
							if param is not None:
								if param is not False:
									applyUsers.append((user, param))
								break
					for user, param in applyUsers:
						functionList.append(((lambda modeObj, actionName, user, param: lambda *params: modeObj.apply(actionName, user, param, *params))(modeObj, actionName, user, param), priority))
		
		if channels:
			genericChannelActionName = "modeactioncheck-channel-{}".format(actionName)
			genericChannelActionNameWithUser = "******".format(actionName)
			for modeType in self.channelModes:
				for mode, modeObj in modeType.iteritems():
					if actionName not in modeObj.affectedActions:
						continue
					priority = modeObj.affectedActions[actionName]
					actionList = []
					for action in self.actions.get("modeactioncheck-channel", []):
						actionList.append(((lambda action, actionName, mode: lambda channel, *params: action[0](actionName, mode, channel, *params))(action, actionName, mode), action[1]))
					for action in self.actions.get("modeactioncheck-channel-withuser", []):
						for user in users:
							actionList.append(((lambda action, actionName, mode, user: lambda channel, *params: action[0](actionName, mode, channel, user, *params))(action, actionName, mode, user), action[1]))
					for action in self.actions.get(genericChannelActionName, []):
						actionList.append(((lambda action, mode: lambda channel, *params: action[0](mode, channel, *params))(action, mode), action[1]))
					for action in self.actions.get(genericChannelActionNameWithUser, []):
						for user in users:
							actionList.append(((lambda action, mode, user: lambda channel, *params: action[0](mode, channel, user, *params))(action, mode, user), action[1]))
					modeChannelActionName = "modeactioncheck-channel-{}-{}".format(mode, actionName)
					modeChannelActionNameWithUser = "******".format(mode, actionName)
					for action in self.actions.get(modeChannelActionNameWithUser, []):
						for user in users:
							actionList.append(((lambda action, user: lambda channel, *params: action[0](channel, user, *params))(action, user), action[1]))
					actionList = sorted(self.actions.get(modeChannelActionName, []) + actionList, key=lambda action: action[1], reverse=True)
					applyChannels = []
					for channel in channels:
						for action in actionList:
							param = action[0](channel, *params)
							if param is not None:
								if param is not False:
									applyChannels.append((channel, param))
								break
					for channel, param in applyChannels:
						functionList.append(((lambda modeObj, actionName, channel, param: lambda *params: modeObj.apply(actionName, channel, param, *params))(modeObj, actionName, channel, param), priority))
		return functionList
	
	def _getActionFunctionList(self, actionName, *params, **kw):
		functionList = self.actions.get(actionName, [])
		functionList = functionList + self._getActionModes(actionName, *params, **kw)
		return sorted(functionList, key=lambda action: action[1], reverse=True)
	
	def _combineActionFunctionLists(self, actionLists):
		"""
		Combines multiple lists of action functions into one.
		Assumes all lists are sorted.
		Takes a dict mapping action names to their action function lists.
		Returns a list in priority order (highest to lowest) of (actionName, function) tuples.
		"""
		fullActionList = []
		for actionName, actionList in actionLists.iteritems():
			insertPos = 0
			for action in actionList:
				try:
					while fullActionList[insertPos][1] > action[1]:
						insertPos += 1
					fullActionList.insert(insertPos, (actionName, action[0]))
				except IndexError:
					fullActionList.append((actionName, action[0]))
				insertPos += 1
		return fullActionList
	
	def runActionStandard(self, actionName, *params, **kw):
		"""
		Calls all functions for a given action with the given parameters in
		priority order. Accepts the 'users' and 'channels' keyword arguments to
		determine which mode handlers should be included.
		"""
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			action[0](*params)
	
	def runActionUntilTrue(self, actionName, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until one of them returns a true value. Returns True
		when one of the functions returned True. Accepts the 'users' and
		'channels' keyword arguments to determine which mode handlers should be
		included.
		"""
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			if action[0](*params):
				return True
		return False
	
	def runActionUntilFalse(self, actionName, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until one of them returns a false value. Returns True
		when one of the functions returned False. Accepts the 'users' and
		'channels' keyword arguments to determine which mode handlers should be
		included.
		"""
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			if not action[0](*params):
				return True
		return False
	
	def runActionUntilValue(self, actionName, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until one of them returns a non-None value. Returns the
		value returned by the function that returned a non-None value. Accepts
		the 'users' and 'channels' keyword arguments to determine which mode
		handlers should be included.
		"""
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			value = action[0](*params)
			if value is not None:
				return value
		return None
	
	def runActionFlagTrue(self, actionName, *params, **kw):
		"""
		Calls all functions for a given action with the given parameters in
		priority order. Returns True when one of the functions returns a true
		value. Accepts the 'users' and 'channels' keyword arguments to
		determine which mode handlers should be included.
		"""
		oneIsTrue = False
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			if action[0](*params):
				oneIsTrue = True
		return oneIsTrue
	
	def runActionFlagFalse(self, actionName, *params, **kw):
		"""
		Calls all functions for a given action with the given parameters in
		priority order. Returns True when one of the functions returns a false
		value. Accepts the 'users' and 'channels' keyword arguments to
		determine which mode handlers should be included.
		"""
		oneIsFalse = False
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			if action[0](*params):
				oneIsFalse = True
		return oneIsFalse
	
	def runActionProcessing(self, actionName, data, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until the provided data is all processed (the data
		parameter becomes empty). Accepts 'users' and 'channels' keyword
		arguments to determine which mode handlers should be included.
		"""
		actionList = self._getActionFunctionList(actionName, data, *params, **kw)
		for action in actionList:
			action[0](data, *params)
			if not data:
				return
	
	def runActionProcessingMultiple(self, actionName, dataList, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until the provided data is all processed (all of the
		data structures in the dataList parameter become empty). Accepts
		'users' and 'channels' keyword arguments to determine which mode
		handlers should be included.
		"""
		paramList = dataList + params
		actionList = self._getActionFunctionList(actionName, *paramList, **kw)
		for action in actionList:
			action[0](*paramList)
			for data in dataList:
				if data:
					break
			else:
				return
	
	def runComboActionStandard(self, actionList, **kw):
		"""
		Calls all functions for the given actions with the given parameters in
		priority order. Actions are specifed as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			actionFunc(*actionParameters[actionName])
	
	def runComboActionUntilTrue(self, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until one of the functions returns a true value. Actions
		are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns True if one of the functions returned a true value. Accepts
		'users' and 'channels' keyword arguments to determine which mode
		handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			if actionFunc(*actionParameters[actionName]):
				return True
		return False
	
	def runComboActionUntilFalse(self, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until one of the functions returns a false value.
		Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns True if one of the functions returned a false value. Accepts
		'users' and 'channels' keyword arguments to determine which mode
		handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			if not actionFunc(*actionParameters[actionName]):
				return True
		return False
	
	def runComboActionUntilValue(self, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until one of the functions returns a non-None value.
		Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns the value returned by the function that returned a non-None
		value. Accepts 'users' and 'channels' keyword arguments to determine
		which mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			value = actionFunc(*actionParameters[actionName])
			if value is not None:
				return value
		return None
	
	def runComboActionFlagTrue(self, actionList, **kw):
		"""
		Calls all functions for the given actions with the given parameters in
		priority order. Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns True if any of the functions called returned a true value.
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		oneIsTrue = False
		for actionName, actionFunc in funcList:
			if actionFunc(*actionParameters[actionName]):
				oneIsTrue = True
		return oneIsTrue
	
	def runComboActionFlagFalse(self, actionList, **kw):
		"""
		Calls all functions for the given actions with the given parameters in
		priority order. Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns True if any of the functions called returned a false value.
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		oneIsFalse = False
		for actionName, actionFunc in funcList:
			if not actionFunc(*actionParameters[actionName]):
				oneIsFalse = True
		return oneIsFalse
	
	def runComboActionProcessing(self, data, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until the data given has been processed (the data
		parameter becomes empty). Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = [data] + action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			actionFunc(*actionParameters[actionName])
			if not data:
				break
	
	def runComboActionProcessingMultiple(self, dataList, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until the data given has been processed (all the data
		items in the dataList parameter become empty). Actions are specified as
		a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = dataList + action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			actionFunc(*actionParameters[actionName])
			for data in dataList:
				if data:
					break
			else:
				return
Пример #46
0
class JSONRPCStdoutEmitter(StdoutEmitter):

    transport_serializer = json.dumps
    delimiter = '\n'

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.log = Logger("JSON-RPC-Emitter")

    class JSONRPCError(RuntimeError):
        code = None
        message = "Unknown JSON-RPC Error"

    class ParseError(JSONRPCError):
        code = -32700
        message = "Invalid JSON was received by the server."

    class InvalidRequest(JSONRPCError):
        code = -32600
        message = "The JSON sent is not a valid Request object."

    class MethodNotFound(JSONRPCError):
        code = -32601
        message = "The method does not exist / is not available."

    class InvalidParams(JSONRPCError):
        code = -32602
        message = "Invalid method parameter(s)."

    class InternalError(JSONRPCError):
        code = -32603
        message = "Internal JSON-RPC error."

    @staticmethod
    def assemble_response(response: dict, message_id: int) -> dict:
        response_data = {'jsonrpc': '2.0',
                         'id': str(message_id),
                         'result': response}
        return response_data

    @staticmethod
    def assemble_error(message, code, data=None) -> dict:
        response_data = {'jsonrpc': '2.0',
                         'error': {'code': str(code),
                                   'message': str(message),
                                   'data': data},
                         'id': None}  # error has no ID
        return response_data

    def __serialize(self, data: dict, delimiter=delimiter, as_bytes: bool = False) -> Union[str, bytes]:

        # Serialize
        serialized_response = JSONRPCStdoutEmitter.transport_serializer(data)   # type: str

        if as_bytes:
            serialized_response = bytes(serialized_response, encoding='utf-8')  # type: bytes

        # Add delimiter
        if delimiter:
            if as_bytes:
                delimiter = bytes(delimiter, encoding='utf-8')
            serialized_response = delimiter + serialized_response

        return serialized_response

    def __write(self, data: dict):
        """Outlet"""

        serialized_response = self.__serialize(data=data)

        # Write to stdout file descriptor
        number_of_written_bytes = self.sink(serialized_response)  # < ------ OUTLET
        return number_of_written_bytes

    def clear(self):
        pass

    def message(self, message: str, **kwds):
        self.log.debug(message)

    def echo(self, *args, **kwds):
        pass

    def banner(self, banner):
        pass

    def ipc(self, response: dict, request_id: int, duration) -> int:
        """
        Write RPC response object to stdout and return the number of bytes written.
        """

        # Serialize JSON RPC Message
        assembled_response = self.assemble_response(response=response, message_id=request_id)
        size = self.__write(data=assembled_response)
        self.log.info(f"OK | Responded to IPC request #{request_id} with {size} bytes, took {duration}")
        return size

    def error(self, e):
        """
        Write RPC error object to stdout and return the number of bytes written.
        """
        try:
            assembled_error = self.assemble_error(message=e.message, code=e.code)
        except AttributeError:
            if not isinstance(e, self.JSONRPCError):
                self.log.info(str(e))
                raise e  # a different error was raised
            else:
                raise self.JSONRPCError

        size = self.__write(data=assembled_error)
        # self.log.info(f"Error {e.code} | {e.message}")  # TODO: Restore this log message
        return size
Пример #47
0
class SmsFactory(ClientFactory, Client):
    room = 'NA'
    actions = ('sendsms, readsms')

    def __init__(self, event_fct=None):
        self.protocol = serialLineProtocol()
        self.uid = uuid.uuid4()
        self.protocol.factory = self
        self.log = Logger()
        self.first = True
        self.event = event_fct
        self.callback = None
        self.wait = False
        self.response = ''
        self.resp_re = re.compile(
            r'^OK|ERROR|(\+CM[ES] ERROR: \d+)|(COMMAND NOT SUPPORT)$')

    def receive(self, line):
        if self.wait:
            if self.resp_re.match(line):
                self.wait = False
                self.response.append(line)
                if line.startswith('ERROR'):
                    self.log.critical('error from Modem: %s' % line)
                    if self.callback:
                        self.callback.errback(self.response)
                else:
                    if self.callback:
                        self.callback.callback(self.response)
                self.response = ''
                if self.callback:
                    self.callback = None
            else:
                self.response.append(line)
        elif self.event:
            self.event(line)
        else:
            self.log.debug('unmanaged message from Modem: %s' % line)

    def sendsms(self, recipient, message, callback_fct=None):
        def recipient_set(res):
            self.log.debug(
                'do we have > ? ==> %s' % ('OK' if res == '>' else 'No: ' + res))
            self.callback = defer.Deferred
            if callback_fct:
                self.callback.addCallback(callback_fct)
            self.wait = True
            self.protocol.send(message + b'\x1a')

        def text_mode(res):
            self.callback = defer.Deferred
            self.callback.addCallback(recipient_set)
            self.wait = True
            self.protocol.send(b'AT+CMGS="' + recipient.encode() + b'"\r')

        def modem_init(res):
            self.first = False
            self.callback = defer.Deferred
            self.callback.addCallback(text_mode)
            self.wait = True
            self.protocol.send(b'AT+CMGF=1\r')
        if self.first:
            self.wait = True
            self.callback = defer.Deferred()
            self.callback.addCallback(modem_init)
            self.protocol.send(b'ATZ\r')
        else:
            modem_init('OK')

    def _write(self, txt):
        self.protocol.send(txt.encode())
def test_collect_rewards_integration(
        click_runner, testerchain, test_registry,
        stakeholder_configuration_file_location, blockchain_alice,
        blockchain_bob, random_policy_label, beneficiary,
        preallocation_escrow_agent, mock_allocation_registry, manual_worker,
        token_economics, mock_transacting_power_activation, stake_value,
        policy_value, policy_rate):

    half_stake_time = token_economics.minimum_locked_periods // 2  # Test setup
    logger = Logger("Test-CLI")  # Enter the Teacher's Logger, and
    current_period = 0  # State the initial period for incrementing

    staker_address = preallocation_escrow_agent.principal_contract.address
    worker_address = manual_worker

    # The staker is staking.
    stakes = StakeList(registry=test_registry, checksum_address=staker_address)
    stakes.refresh()
    assert stakes

    staking_agent = ContractAgency.get_agent(StakingEscrowAgent,
                                             registry=test_registry)
    assert worker_address == staking_agent.get_worker_from_staker(
        staker_address=staker_address)

    ursula_port = select_test_port()
    ursula = Ursula(is_me=True,
                    checksum_address=staker_address,
                    worker_address=worker_address,
                    registry=test_registry,
                    rest_host='127.0.0.1',
                    rest_port=ursula_port,
                    start_working_now=False,
                    network_middleware=MockRestMiddleware())

    MOCK_KNOWN_URSULAS_CACHE[ursula_port] = ursula
    assert ursula.worker_address == worker_address
    assert ursula.checksum_address == staker_address

    mock_transacting_power_activation(account=worker_address,
                                      password=INSECURE_DEVELOPMENT_PASSWORD)

    # Confirm for half the first stake duration
    for _ in range(half_stake_time):
        logger.debug(
            f">>>>>>>>>>> TEST PERIOD {current_period} <<<<<<<<<<<<<<<<")
        ursula.confirm_activity()
        testerchain.time_travel(periods=1)
        current_period += 1

    # Alice creates a policy and grants Bob access
    blockchain_alice.selection_buffer = 1

    M, N = 1, 1
    expiration = maya.now() + datetime.timedelta(days=3)
    blockchain_policy = blockchain_alice.grant(bob=blockchain_bob,
                                               label=random_policy_label,
                                               m=M,
                                               n=N,
                                               value=policy_value,
                                               expiration=expiration,
                                               handpicked_ursulas={ursula})

    # Ensure that the handpicked Ursula was selected for the policy
    arrangement = list(blockchain_policy._accepted_arrangements)[0]
    assert arrangement.ursula == ursula

    # Bob learns about the new staker and joins the policy
    blockchain_bob.start_learning_loop()
    blockchain_bob.remember_node(node=ursula)
    blockchain_bob.join_policy(random_policy_label,
                               bytes(blockchain_alice.stamp))

    # Enrico Encrypts (of course)
    enrico = Enrico(policy_encrypting_key=blockchain_policy.public_key,
                    network_middleware=MockRestMiddleware())

    verifying_key = blockchain_alice.stamp.as_umbral_pubkey()

    for index in range(half_stake_time - 5):
        logger.debug(
            f">>>>>>>>>>> TEST PERIOD {current_period} <<<<<<<<<<<<<<<<")
        ursula.confirm_activity()

        # Encrypt
        random_data = os.urandom(random.randrange(20, 100))
        ciphertext, signature = enrico.encrypt_message(message=random_data)

        # Decrypt
        cleartexts = blockchain_bob.retrieve(message_kit=ciphertext,
                                             data_source=enrico,
                                             alice_verifying_key=verifying_key,
                                             label=random_policy_label)
        assert random_data == cleartexts[0]

        # Ursula Staying online and the clock advancing
        testerchain.time_travel(periods=1)
        current_period += 1

    # Finish the passage of time
    for _ in range(
            5 - 1
    ):  # minus 1 because the first period was already confirmed in test_ursula_run
        logger.debug(
            f">>>>>>>>>>> TEST PERIOD {current_period} <<<<<<<<<<<<<<<<")
        ursula.confirm_activity()
        current_period += 1
        testerchain.time_travel(periods=1)

    #
    # WHERES THE MONEY URSULA?? - Collecting Rewards
    #

    # The address the client wants Ursula to send policy rewards to
    burner_wallet = testerchain.w3.eth.account.create(
        INSECURE_DEVELOPMENT_PASSWORD)

    # The policy rewards wallet is initially empty, because it is freshly created
    assert testerchain.client.get_balance(burner_wallet.address) == 0

    # Rewards will be unlocked after the
    # final confirmed period has passed (+1).
    logger.debug(f">>>>>>>>>>> TEST PERIOD {current_period} <<<<<<<<<<<<<<<<")
    testerchain.time_travel(periods=1)
    current_period += 1
    logger.debug(f">>>>>>>>>>> TEST PERIOD {current_period} <<<<<<<<<<<<<<<<")

    # Since we are mocking the blockchain connection, manually consume the transacting power of the Beneficiary.
    mock_transacting_power_activation(account=beneficiary,
                                      password=INSECURE_DEVELOPMENT_PASSWORD)

    # Collect Policy Reward
    collection_args = ('stake', 'collect-reward', '--mock-networking',
                       '--config-file',
                       stakeholder_configuration_file_location,
                       '--policy-reward', '--no-staking-reward',
                       '--withdraw-address', burner_wallet.address,
                       '--allocation-filepath',
                       MOCK_INDIVIDUAL_ALLOCATION_FILEPATH, '--force')

    result = click_runner.invoke(nucypher_cli,
                                 collection_args,
                                 input=INSECURE_DEVELOPMENT_PASSWORD,
                                 catch_exceptions=False)
    assert result.exit_code == 0

    # Policy Reward
    collected_policy_reward = testerchain.client.get_balance(
        burner_wallet.address)
    expected_collection = policy_rate * 30
    assert collected_policy_reward == expected_collection

    #
    # Collect Staking Reward
    #
    token_agent = ContractAgency.get_agent(agent_class=NucypherTokenAgent,
                                           registry=test_registry)
    balance_before_collecting = token_agent.get_balance(address=staker_address)

    collection_args = ('stake', 'collect-reward', '--mock-networking',
                       '--config-file',
                       stakeholder_configuration_file_location,
                       '--no-policy-reward', '--staking-reward',
                       '--allocation-filepath',
                       MOCK_INDIVIDUAL_ALLOCATION_FILEPATH, '--force')

    result = click_runner.invoke(nucypher_cli,
                                 collection_args,
                                 input=INSECURE_DEVELOPMENT_PASSWORD,
                                 catch_exceptions=False)
    assert result.exit_code == 0

    # The beneficiary has withdrawn her staking rewards, which are now in the staking contract
    assert token_agent.get_balance(
        address=staker_address) >= balance_before_collecting