Esempio n. 1
0
class WorkerRep(object):
    """Helper class to represent a worker in the broker.

    Instances of this class are used to track the state of the attached worker
    and carry the timers for incomming and outgoing heartbeats.

    :param proto:    the worker protocol id.
    :type wid:       str
    :param wid:      the worker id.
    :type wid:       str
    :param service:  service this worker serves
    :type service:   str
    :param stream:   the ZMQStream used to send messages
    :type stream:    ZMQStream
    """
    def __init__(self, proto, wid, service, stream):
        self.proto = proto
        self.id = wid
        self.service = service
        self.curr_liveness = HB_LIVENESS
        self.stream = stream
        self.last_hb = 0
        self.hb_out_timer = PeriodicCallback(self.send_hb, HB_INTERVAL)
        self.hb_out_timer.start()
        return

    def send_hb(self):
        """Called on every HB_INTERVAL.

        Decrements the current liveness by one.

        Sends heartbeat to worker.
        """
        self.curr_liveness -= 1
        msg = [self.id, b'', self.proto, b'\x04']
        self.stream.send_multipart(msg)
        return

    def on_heartbeat(self):
        """Called when a heartbeat message from the worker was received.

        Sets current liveness to HB_LIVENESS.
        """
        self.curr_liveness = HB_LIVENESS
        return

    def is_alive(self):
        """Returns True when the worker is considered alive.
        """
        return self.curr_liveness > 0

    def shutdown(self):
        """Cleanup worker.

        Stops timer.
        """
        self.hb_out_timer.stop()
        self.hb_out_timer = None
        self.stream = None
        return
Esempio n. 2
0
class BufferModule(base.ZmqProcess):

    def __init__(self, recv_addr, send_addr, recv_title, send_title, state_handler, period=1000):
        super().__init__()
        self.sub_stream    = None
        self.timer         = None        
        self.recv_addr     = recv_addr
        self.recv_title    = recv_title
        self.state_handler = state_handler
        self.period        = period
        self.callback      = Callback(Publisher(send_addr, send_title), self.state_handler)
        
    def setup(self):
        super().setup() 
        self.sub_stream, _ = self.stream(zmq.SUB, self.recv_addr, bind=False, subscribe=self.recv_title.encode('utf-8'))
        self.sub_stream.on_recv(SubStreamHandler(self.sub_stream, self.stop, self.state_handler))
        self.timer = PeriodicCallback(self.callback, self.period, self.loop)

    def run(self):
        self.setup()
        print('Start loop!')
        self.timer.start()        
        self.loop.start()

    def stop(self):
        self.loop.stop()
Esempio n. 3
0
class DeviceRep(object):
    """
    Helper class to represent a device to a worker
    """

    def __init__(self, device_id, state='unknown'):
        self.id = device_id
        self.state = state
        self.curr_liveness = CLIENT_HB_LIVENESS
        self.hb_timer = PeriodicCallback(self.heartbeat, CLIENT_HB_INTERVAL)
        self.hb_timer.start()
        return

    def heartbeat(self):
        if self.curr_liveness > 0:
            self.curr_liveness -= 1
        if self.curr_liveness == 0:
            self.state = 'dead'
        return

    def on_message_received(self):
        self.curr_liveness = CLIENT_HB_LIVENESS
        return

    def is_alive(self):
        return self.curr_liveness > 0

    def get_state(self):
        return self.state

    def shutdown(self):
        self.hb_timer.stop()
        self.hb_timer = None
Esempio n. 4
0
class AppServer(object):

    #	listen = "127.0.0.1"
    listen = "*"

    port = 5555

    def __init__(self):

        self.ctx = zmq.Context()
        self.loop = IOLoop.instance()
        self.client_identities = {}

        self.server = self.ctx.socket(zmq.ROUTER)

        bind_addr = "tcp://%s:%s" % (self.listen, self.port)
        print("bind_addr=", bind_addr)

        self.server.bind(bind_addr)
        print("Server listening for new client connections at", bind_addr)
        self.server = ZMQStream(self.server)
        self.server.on_recv(self.on_recv)

        self.periodic = PeriodicCallback(self.periodictask, 1000)

    def start(self):

        self.periodic.start()
        try:
            self.loop.start()
        except KeyboardInterrupt:
            pass

    def periodictask(self):
        stale_clients = []

        for client_id, last_seen in self.client_identities.items():
            if last_seen + timedelta(seconds=10) < datetime.utcnow():
                stale_clients.append(client_id)
            else:
                msg = HelloMessage()
                msg.send(self.server, client_id)

        for client_id in stale_clients:
            print(
                "Haven't received a HELO from cliient %s recently. Dropping from list of connected clients."
                % client_id)
            del self.client_identities[client_id]

        sys.stdout.write(".")
        sys.stdout.flush()

    def on_recv(self, msg):
        identity = msg[0]

        self.client_identities[identity] = datetime.utcnow()

        msg_type = msg[1]
        print("Received message of type %s from client ID %s!" %
              (msg_type, identity))
Esempio n. 5
0
def main(pat):
    
    fname = find_connection_file(pat)
    with open(fname) as f:
        cfg = json.load(f)
    
    url = "%s://%s:%s" % (cfg.get('transport', 'tcp'), cfg['ip'], cfg['iopub_port'])
    
    session = Session(key=cfg['key'])
    
    ctx = zmq.Context.instance()
    sub = ctx.socket(zmq.SUB)
    sub.subscribe = b''
    sub.connect(url)
    # import IPython
    # IPython.embed()
    # return
    
    stream = ZMQStream(sub)
    
    stream.on_recv(lambda msg_list: log_msg(session, msg_list))
    
    pc = PeriodicCallback(print_time, 5 * 60 * 1000)
    pc.start()
    IOLoop.instance().start()
Esempio n. 6
0
class WorkerRep(object):

    """Helper class to represent a worker in the broker.

    Instances of this class are used to track the state of the attached worker
    and carry the timers for incomming and outgoing heartbeats.

    :type wid:       str
    :param wid:      the worker id.
    :param service:  service this worker serves
    :type service:   str
    :param stream:   the ZMQStream used to send messages
    :type stream:    ZMQStream
    """

    def __init__(self, wid, service, stream):
        self.id = wid
        self.service = service
        self.multicasts = []
        self.curr_liveness = HB_LIVENESS
        self.stream = stream
        self.last_hb = 0
        self.hb_out_timer = PeriodicCallback(self.send_hb, HB_INTERVAL)
        self.hb_out_timer.start()
        return

    def send_hb(self):
        """Called on every HB_INTERVAL.

        Decrements the current liveness by one.

        Sends heartbeat to worker.
        """
        self.curr_liveness -= 1
        msg = [ self.id, b'', MDP_WORKER_VERSION, b'\x05' ]
        self.stream.send_multipart(msg)
        return

    def on_heartbeat(self):
        """Called when a heartbeat message from the worker was received.

        Sets current liveness to HB_LIVENESS.
        """
        self.curr_liveness = HB_LIVENESS
        return

    def is_alive(self):
        """Returns True when the worker is considered alive.
        """
        return self.curr_liveness > 0

    def shutdown(self):
        """Cleanup worker.

        Stops timer.
        """
        self.hb_out_timer.stop()
        self.hb_out_timer = None
        self.stream = None
        return
Esempio n. 7
0
 def run(self):
     _period = 1000 * self.configreader["services"][self.name]["checkmail_interval"]
     beat = PeriodicCallback(self.cmd_processmails,
                             _period,
                             io_loop=self.ioloop)
     beat.start()
     Service.run(self)
Esempio n. 8
0
class TaskState (object):
    """ Tracks task state (with help of watchdog) """

    log = skytools.getLogger ('d:TaskState')

    def __init__ (self, uid, name, info, ioloop, cc, xtx):
        self.uid = uid
        self.name = name
        self.info = info
        self.pidfile = info['config']['pidfile']
        self.ioloop = ioloop
        self.cc = cc
        self.xtx = xtx
        self.timer = None
        self.timer_tick = 1
        self.heartbeat = False
        self.start_time = None
        self.dead_since = None

    def start (self):
        self.start_time = time.time()
        self.timer = PeriodicCallback (self.watchdog, self.timer_tick * 1000, self.ioloop)
        self.timer.start()

    def stop (self):
        try:
            self.log.info ('Signalling %s', self.name)
            skytools.signal_pidfile (self.pidfile, signal.SIGINT)
        except:
            self.log.exception ('signal_pidfile failed: %s', self.pidfile)

    def watchdog (self):
        live = skytools.signal_pidfile (self.pidfile, 0)
        if live:
            self.log.debug ('%s is alive', self.name)
            if self.heartbeat:
                self.send_reply ('running')
        else:
            self.log.info ('%s is over', self.name)
            self.dead_since = time.time()
            self.timer.stop()
            self.timer = None
            self.send_reply ('stopped')

    def ccpublish (self, msg):
        assert isinstance (msg, TaskReplyMessage)
        cmsg = self.xtx.create_cmsg (msg)
        cmsg.send_to (self.cc)

    def send_reply (self, status, feedback = {}):
        msg = TaskReplyMessage(
                req = 'task.reply.%s' % self.uid,
                handler = self.info['task']['task_handler'],
                task_id = self.info['task']['task_id'],
                status = status,
                feedback = feedback)
        self.ccpublish (msg)
Esempio n. 9
0
class TaskState(object):
    """ Tracks task state (with help of watchdog) """

    log = skytools.getLogger('d:TaskState')

    def __init__(self, uid, name, info, ioloop, cc, xtx):
        self.uid = uid
        self.name = name
        self.info = info
        self.pidfile = info['config']['pidfile']
        self.ioloop = ioloop
        self.cc = cc
        self.xtx = xtx
        self.timer = None
        self.timer_tick = 1
        self.heartbeat = False
        self.start_time = None
        self.dead_since = None

    def start(self):
        self.start_time = time.time()
        self.timer = PeriodicCallback(self.watchdog, self.timer_tick * 1000,
                                      self.ioloop)
        self.timer.start()

    def stop(self):
        try:
            self.log.info('Signalling %s', self.name)
            skytools.signal_pidfile(self.pidfile, signal.SIGINT)
        except:
            self.log.exception('signal_pidfile failed: %s', self.pidfile)

    def watchdog(self):
        live = skytools.signal_pidfile(self.pidfile, 0)
        if live:
            self.log.debug('%s is alive', self.name)
            if self.heartbeat:
                self.send_reply('running')
        else:
            self.log.info('%s is over', self.name)
            self.dead_since = time.time()
            self.timer.stop()
            self.timer = None
            self.send_reply('stopped')

    def ccpublish(self, msg):
        assert isinstance(msg, TaskReplyMessage)
        cmsg = self.xtx.create_cmsg(msg)
        cmsg.send_to(self.cc)

    def send_reply(self, status, feedback={}):
        msg = TaskReplyMessage(req='task.reply.%s' % self.uid,
                               handler=self.info['task']['task_handler'],
                               task_id=self.info['task']['task_id'],
                               status=status,
                               feedback=feedback)
        self.ccpublish(msg)
Esempio n. 10
0
class AppClient(object):

    # where to connect to - point to another IP or hostname:5556 if running server on another machine
    endpoint = "tcp://127.0.0.1:5556"

    # crypto = True means 'use CurveZMQ'. False means don't. Must match server.

    crypto = True

    def __init__(self):

        self.ctx = zmq.Context()
        self.loop = IOLoop.instance()

        self.client = self.ctx.socket(zmq.DEALER)

        if self.crypto:
            self.keymonkey = KeyMonkey("client")
            self.client = self.keymonkey.setupClient(self.client,
                                                     self.endpoint, "server")

        self.client.connect(self.endpoint)
        print("Connecting to", self.endpoint)
        self.client = ZMQStream(self.client)

        self.client.on_recv(self.on_recv)

        self.periodic = PeriodicCallback(self.periodictask, 1000)

        self.last_recv = None

    def periodictask(self):

        if not self.last_recv or self.last_recv + timedelta(
                seconds=5) < datetime.utcnow():
            print(
                "Hmmm... haven't heard from the server in 5 seconds... Server unresponsive."
            )

        print("Sending HELLO to server")
        msg = HelloMessage()
        msg.send(self.client)

    def start(self):

        self.periodic.start()
        try:
            self.loop.start()
        except KeyboardInterrupt:
            pass

    def on_recv(self, msg):

        self.last_recv = datetime.utcnow()

        print("Received a message of type %s from server!" % msg[0])
Esempio n. 11
0
 def start(self):
     loop = self.loop
     loop.add_handler(self.udp.handle.fileno(), self.handle_beacon, loop.READ)
     stream = ZMQStream(self.pipe, loop)
     stream.on_recv(self.control_message)
     pc = PeriodicCallback(self.send_ping, PING_INTERVAL * 1000, loop)
     pc.start()
     pc = PeriodicCallback(self.reap_peers, PING_INTERVAL * 1000, loop)
     pc.start()
     loop.start()
Esempio n. 12
0
 def start(self):
     loop = self.loop
     loop.add_handler(self.udp.handle.fileno(), self.handle_beacon, loop.READ)
     stream = ZMQStream(self.pipe, loop)
     stream.on_recv(self.control_message)
     pc = PeriodicCallback(self.send_ping, PING_INTERVAL * 1000, loop)
     pc.start()
     pc = PeriodicCallback(self.reap_peers, PING_INTERVAL * 1000, loop)
     pc.start()
     loop.start()
Esempio n. 13
0
class AppClient(object):
    def __init__(self):
        self.ctx = zmq.Context()
        self.loop = IOLoop.instance()
        self.endpoint = "tcp://127.0.0.1:5556"
        self.client = self.ctx.socket(zmq.DEALER)
        self.client.setsockopt(
            zmq.LINGER, 0
        )  # Without linger and timeouts you might have problems when closing context
        self.client.setsockopt(zmq.RCVTIMEO, 5000)  # 5s
        self.client.setsockopt(zmq.SNDTIMEO, 5000)
        print("Connecting to", self.endpoint)
        self.client.connect(self.endpoint)
        self.client = ZMQStream(self.client)
        self.client.on_recv(self.on_recv)
        self.periodic = PeriodicCallback(self.periodictask, 1000)
        self.last_recv = None

    def disconnect(self):
        if self.ctx is not None:
            try:
                self.periodic.stop()
                print("Closing socket and context")
                self.client.close()
                self.ctx.term()
            except Exception as e:
                print(e)

    def periodictask(self):
        if self.client is None:
            return
        if not self.last_recv or self.last_recv + timedelta(
                seconds=5) < datetime.utcnow():
            print("No data from remote (5s)... [ping]")
        print("Sending HELLO to server")
        msg = HelloMessage()
        msg.send(self.client)

    def start(self):
        try:
            self.periodic.start()
            self.loop.start()
            msg = HelloMessage()
            msg.send(self.client)
        except KeyboardInterrupt:
            print("\n\nCtrl+C detected\n")
        except Exception as E:
            print("Error detected")
            print(str(E))
        finally:
            self.disconnect()

    def on_recv(self, msg):
        self.last_recv = datetime.utcnow()
        print("Received a message of type %s from server!" % msg[0])
Esempio n. 14
0
class AppServer(object):
    def __init__(self):
        self.listen = "127.0.0.1"
        self.port = 5556
        self.ctx = zmq.Context()
        self.loop = IOLoop.instance()
        self.client_identities = {}
        self.server = self.ctx.socket(zmq.ROUTER)
        self.server.setsockopt(
            zmq.LINGER, 0
        )  # Without linger and timeouts you might have problems when closing context
        self.server.setsockopt(zmq.RCVTIMEO, 5000)  # 5s
        self.server.setsockopt(zmq.SNDTIMEO, 5000)
        bind_addr = "tcp://%s:%s" % (self.listen, self.port)
        self.server.bind(bind_addr)
        print("Server listening for new client connections at", bind_addr)
        self.server = ZMQStream(self.server)
        self.server.on_recv(self.on_recv)
        self.periodic = PeriodicCallback(self.periodictask, 1000)

    def start(self):
        self.periodic.start()
        try:
            self.loop.start()
        except KeyboardInterrupt:
            self.periodic.stop()
            print("\nClosing socket and context\n")
            self.server.close()
            self.ctx.term()

    def periodictask(self):
        stale_clients = []
        for client_id, last_seen in self.client_identities.items():
            if last_seen + timedelta(seconds=10) < datetime.utcnow():
                stale_clients.append(client_id)
            else:
                msg = HelloMessage()
                msg.send(self.server, client_id)
        for client_id in stale_clients:
            print(
                "\nHaven't received a HELO from client %s recently. Dropping from list of connected clients."
                % client_id)
            del self.client_identities[client_id]
        sys.stdout.write(".")
        sys.stdout.flush()

    def on_recv(self, msg):
        identity = msg[
            0]  # This contains client id (socket handle), use to reply it back
        self.client_identities[identity] = datetime.utcnow()
        msg_type = msg[1]
        print("Received message of type %s from client ID %s!" %
              (msg_type, identity))
Esempio n. 15
0
    def loop():
      self.__ioloop = IOLoop()
      queue_socket = self.__context.socket(zmq.PULL)
      queue_socket.connect(self.message_address)
      queue_stream = ZMQStream(queue_socket, self.__ioloop)
      worker_socket = self.__context.socket(zmq.DEALER)
      for address in self.active_connections:
        worker_socket.connect(address)
      worker_stream = ZMQStream(worker_socket, self.__ioloop)

      def receive_response(message, response_override=None):
        self.__queued_messages.pop(message[1], None)
        self.__message_timeouts.pop(message[1], None)
        callback = self.__callbacks.pop(message[1], None)
        if callback:
          try:
            callback(response_override or self.loads(self.decompress(message[2])))
          except Exception as e:
            self.log_error(e)
            callback({'error': e})
      worker_stream.on_recv(receive_response)

      def queue_message(message):
        if message[0]:
          if message[0] == WORKER_SOCKET_CONNECT and message[2] not in self.active_connections:
            self.active_connections.add(message[2])
            worker_stream.socket.connect(message[2])
          elif message[0] == WORKER_SOCKET_DISCONNECT and message[2] in self.active_connections:
            self.active_connections.remove(message[2])
            worker_stream.socket.disconnect(message[2])
          return
        self.__queued_messages[message[1]] = (time(), message)
        try:
          worker_stream.send_multipart(message)
        except Exception as e:
          self.log_error(e)
      queue_stream.on_recv(queue_message)

      def timeout_message():
        now = time()
        for message, retry in [(item[1], self.__message_auto_retry.get(item[1][1], self.__auto_retry)) for item, t in ((i, self.__message_timeouts.get(i[1][1], self.__timeout)) for i in self.__queued_messages.itervalues()) if t >= 0 and (item[0] + t < now)]:
          if retry:
            logging.info('Worker timeout, requeuing ' + message[1])
            queue_message(message)
          else:
            receive_response(('', message[1]), {'error': 'timeout'})
      timeout_callback = PeriodicCallback(timeout_message, int(abs(self.__timeout * 1000.0)), io_loop = self.__ioloop)
      timeout_callback.start()

      self.__ioloop.start()
      self.__thread = None
Esempio n. 16
0
class DeviceApp(object):

    #	endpoint = "tcp://127.0.0.1:5556"
    #	endpoint = "tcp://localhost:5556"
    endpoint = "tcp://165.227.24.226:5556"

    def __init__(self):

        self.ctx = zmq.Context()
        self.loop = IOLoop.instance()

        self.client = self.ctx.socket(zmq.DEALER)

        self.client.connect(self.endpoint)
        print("Connecting to", self.endpoint)
        self.client = ZMQStream(self.client)

        self.client.on_recv(self.on_recv)

        self.periodic = PeriodicCallback(self.periodictask, 1000)

        self.last_recv = None

    def periodictask(self):

        if not (self.last_recv) or (self.last_recv +
                                    timedelta(seconds=5)) < datetime.utcnow():
            print(
                "Hmmm... haven't heard from the server in 5 seconds... Server unresponsive."
            )

        print("Sending HELLO to server")
        msg = HelloMessage()
        print("msg=", msg)
        msg.send(self.client)

    def start(self):

        self.periodic.start()
        try:
            self.loop.start()
        except KeyboardInterrupt:
            pass

    def on_recv(self, msg):

        self.last_recv = datetime.utcnow()

        print("Received a message of type %s from server!" % msg[0])
Esempio n. 17
0
 def on_run(self, ctx, io_loop, socks, streams):
     z = ZmqRpcProxy(self._uris['manager_rpc'])
     self._data = self._initial_data()
     self._remote_handlers = set(z.available_handlers())
     env = OrderedDict([
         ('ctx', ctx),
         ('io_loop', io_loop),
         ('socks', socks),
         ('streams', streams),
     ])
     f = functools.partial(self.timer__monitor_heartbeats, env)
     callback = PeriodicCallback(f, 1000, io_loop=io_loop)
     callback.start()
     f = functools.partial(self.timer__grim_reaper, env)
     callback = PeriodicCallback(f, 5000, io_loop=io_loop)
     callback.start()
Esempio n. 18
0
class Delay (CCHandler):
    """ Delays all received messages, then dispatches them to another handler. """

    CC_ROLES = ['local', 'remote']

    log = skytools.getLogger ('h:Delay')

    tick = 250 # ms

    def __init__ (self, hname, hcf, ccscript):
        super(Delay, self).__init__(hname, hcf, ccscript)

        self.fwd_hname = self.cf.get ('forward-to')
        self.delay = self.cf.getint ('delay', 0)

        self.fwd_handler = ccscript.get_handler (self.fwd_hname)
        self.queue = collections.deque()

        self.timer = PeriodicCallback (self.process_queue, self.tick, self.ioloop)
        self.timer.start()

    def handle_msg (self, cmsg):
        """ Got message from client -- queue it """
        self.queue.append ((time.time() + self.delay, cmsg))

    def process_queue (self):
        now = time.time()
        try:
            while (self.queue[0][0] <= now):
                at, cmsg = self.queue.popleft()
                size = cmsg.get_size()
                try:
                    self.fwd_handler.handle_msg (cmsg)
                    stat = 'ok'
                except Exception:
                    self.log.exception ('crashed, dropping msg: %s', cmsg.get_dest())
                    stat = 'crashed'
                self.stat_inc ('delay.count')
                self.stat_inc ('delay.bytes', size)
                self.stat_inc ('delay.count.%s' % stat)
                self.stat_inc ('delay.bytes.%s' % stat, size)
        except IndexError:
            pass
    def start(self):
        """Initialize and start the event loop. Listen for ZMQ control
        messages."""
        ctx = zmq.Context()
        socket = ctx.socket(zmq.PAIR)
        socket.bind("tcp://*:{}".format(settings.ZMQ_CONTROL_PORT))

        logserver = LogServer()
        logserver.listen(settings.TCP_LOGGING_PORT)

        loop = self.loop

        stream = ZMQStream(socket, loop)
        stream.on_recv(self.handle_ctrl_msg)

        # TODO: Stop loop and restart on CONFIG reread just to get possible new
        # EVENT_POLL_INTERVAL setting?
        pc = PeriodicCallback(self.chime, self.EVENT_POLL_INTERVAL * 1E3, loop)
        pc.start()

        loop.start()
Esempio n. 20
0
def run_sock_configs(sock_configs, on_run=None, control_pipe=None):
    ctx, io_loop, socks, streams = get_run_context(sock_configs)

    def _on_run():
        on_run(ctx, io_loop, socks, streams)

    if on_run:
        io_loop.add_callback(_on_run)

    def watchdog():
        if control_pipe.poll():
            io_loop.stop()

    if control_pipe:
        callback = PeriodicCallback(watchdog, 500, io_loop=io_loop)
        callback.start()

    try:
        io_loop.start()
    except KeyboardInterrupt:
        pass
Esempio n. 21
0
class _HeartbeatHelper:
    def __init__(self, messaging):
        self.messaging = messaging
        self.last_message_time = time.time()
        self._heart_beat = PeriodicCallback(self._send_state, 1000)
        name = self.messaging._service_name + '.messaging.heartbeat'
        self.logger = logging.getLogger(name)

    def start(self):
        self.logger.info(' Start Heart Beat')
        self._heart_beat.start()

    def message_recieved(self):
        self.last_message_time = time.time()

    def _send_state(self):
        time_now = time.time()
        delta_time = time_now - self.last_message_time
        if delta_time > 1.5:
            msg = create_vex_message('', self.messaging._service_name, self.messaging.uuid)
            self.messaging.add_callback(self.messaging.subscription_socket.send_multipart, msg)
            self.last_message_time = time_now
Esempio n. 22
0
def start(root):

    global loop, path, checkup_periodic, control_port
    path = root

    print('Starting.')

    # add HOSTS to /etc/hosts
    add_hosts(HOSTS)

    # define context
    ctx = zmq.Context()

    # create ioloop
    loop = zmq.eventloop.ioloop.IOLoop()

    # connect req to mongrel2 control port
    c = ctx.socket(zmq.REQ)
    c.connect(M2_CONTROL_PORT)
    control_port = ZMQStream(c, io_loop=loop)

    # define 'checkup' interval
    checkup_periodic = PeriodicCallback(send_checkup, 
                                        CHECKUP_INTERVAL, 
                                        io_loop=loop)

    # load mongrel2 config
    load_mongrel()

    # kill PID if server didn't get shut down at close of last run
    kill_mongrel_with_pid(M2_PID_PATH)

    # start mongrel2 with m2sh
    start_mongrel()

    # start the loop
    checkup_periodic.start()
    loop.start()
Esempio n. 23
0
class Receive:
    def __init__(self):
        self.stop = False
        self.conx = Context.instance()
        self.socket = self.conx.socket(zmq.ROUTER)
        self.socket.bind('tcp://*:5555')
        self.periodic = PeriodicCallback(self.timer, 4000)
        self.periodic.start()

    def prt(self, message):
        print('id=', message[0])
        print('data=', message[1])
        if (message[1] == b'exit'):
            self.stop = True

    async def recv(self):
        while (self.stop == False):
            msg = await self.socket.recv_multipart()
            self.prt(msg)
        self.socket.close()

    async def timer(self):
        print('OMG asynchronicity!')
Esempio n. 24
0
class BufferModule(base.ZmqProcess):
    def __init__(self,
                 recv_addr,
                 send_addr,
                 recv_title,
                 send_title,
                 state_handler,
                 period=1000):
        super().__init__()
        self.sub_stream = None
        self.timer = None
        self.recv_addr = recv_addr
        self.recv_title = recv_title
        self.state_handler = state_handler
        self.period = period
        self.callback = Callback(Publisher(send_addr, send_title),
                                 self.state_handler)

    def setup(self):
        super().setup()
        self.sub_stream, _ = self.stream(
            zmq.SUB,
            self.recv_addr,
            bind=False,
            subscribe=self.recv_title.encode('utf-8'))
        self.sub_stream.on_recv(
            SubStreamHandler(self.sub_stream, self.stop, self.state_handler))
        self.timer = PeriodicCallback(self.callback, self.period, self.loop)

    def run(self):
        self.setup()
        print('Start loop!')
        self.timer.start()
        self.loop.start()

    def stop(self):
        self.loop.stop()
Esempio n. 25
0
    def loop():
      self.__ioloop = IOLoop()
      queue_socket = self.__context.socket(zmq.PULL)
      queue_socket.connect(self.message_address)
      queue_stream = ZMQStream(queue_socket, self.__ioloop)
      worker_socket = self.__context.socket(zmq.DEALER)
      worker_socket.connect(self.address)
      worker_stream = ZMQStream(worker_socket, self.__ioloop)

      def receive_response(message):
        self.__queued_messages.pop(message[1], None)
        self.__message_timeouts.pop(message[1], None)
        callback = self.__callbacks.pop(message[1], None)
        if callback:
          try:
            callback(self.loads(self.decompress(message[2])))
          except Exception as e:
            self.log_error(e)
      worker_stream.on_recv(receive_response)

      def queue_message(message):
        self.__queued_messages[message[1]] = (time() * 1000, message)
        try:
          worker_stream.send_multipart(message)
        except Exception as e:
          self.log_error(e)
      queue_stream.on_recv(queue_message)

      def requeue_message():
        now = time() * 1000
        for message in (item[1] for item in self.__queued_messages.itervalues() if item[0] + self.__message_timeouts.get(item[1][1], self.__retry_ms) < now):
          queue_message(message)
      requeue_callback = PeriodicCallback(requeue_message, self.__retry_ms, io_loop = self.__ioloop)
      requeue_callback.start()

      self.__ioloop.start()
      self.__thread = None
Esempio n. 26
0
class DemoApp(ZMQProcess):
    def __init__ (self):
        super(DemoApp, self).__init__()

    def setup(self):
        super(DemoApp, self).setup()

        self.pub, self.pub_addr = self.stream(zmq.PUB, 'tcp://127.0.0.1:%(port)s', True)

        self.sub, sub_addr = self.stream(zmq.SUB, self.pub_addr, False,
                callback=DemoHandler())
        self.heartbeat = PeriodicCallback(self.ping, 1000, self.loop)

    def ping(self):
        print 'SEND PING'
        self.pub.send_multipart(['ping', json.dumps(['ping', time.time()])])

    def local_run(self):
        print 'START HEARTBEAT'
        self.heartbeat.start()

    def stop(self):
        self.heartbeat.stop()
        self.loop.stop()
Esempio n. 27
0
 def run(self):
     consumer = Process(target=Consumer(self.uris['backend_rep'],
                                     self.uris['consumer_push_be'],
                                     self.uris['consumer_pull_be']).run
     )
     producer = Process(target=self.producer_class(
             self.uris['frontend_rep_uri'],
             self.uris['frontend_pub_uri'],
             self.uris['consumer_pull_be'],
             self.uris['consumer_push_be']).run
     )
     self.io_loop = IOLoop()
     periodic_callback = PeriodicCallback(self.watchdog, 500, self.io_loop)
     periodic_callback.start()
     try:
         consumer.start()
         producer.start()
         self.io_loop.start()
     except KeyboardInterrupt:
         pass
     producer.terminate()
     consumer.terminate()
     logging.getLogger(log_label(self)).info('PRODUCER and CONSUMER have '
                                             'been terminated')
Esempio n. 28
0
    def run(self):
        self.received_count = 0
        ctx, io_loop, self.socks, streams = get_run_context(self.sock_configs)
        self.io_loop = io_loop

        if self.request_delay > 0:
            periodic_callback = PeriodicCallback(self.do_request,
                    self.request_delay * 1000., io_loop)
            io_loop.add_timeout(timedelta(seconds=0.1), lambda: periodic_callback.start())
        else:
            for i in range(self.target_count):
                io_loop.add_callback(self.do_request)

        try:
            io_loop.start()
        except KeyboardInterrupt:
            pass
Esempio n. 29
0
class Broker(object):
    """This is implementation of broker

    You don't need to override any methods in this class. It works immediately.
    Just call start_listening() method

    :type context:    Context
    :param context:   instance of zmq.Context
    :param endpoint:  listening address
    :type endpoint:   str
    """

    def __init__(self, context, endpoint):
        socket = context.socket(zmq.ROUTER)
        socket.bind(endpoint)
        self.stream = ZMQStream(socket)
        self.stream.on_recv(self.on_message)

        # services, workers and multicast groups
        self._workers = {}
        self._services = {}
        self._multicasts = {}
        self.hb_check_timer = PeriodicCallback(self.on_timer, HB_INTERVAL)
        self.hb_check_timer.start()
        return

    def start_listening(self):
        """Start listening to new messages
        """
        IOLoop.instance().start()

    def stop_listening(self):
        """Stop listening
        """
        IOLoop.instance().stop()

    def on_message(self, msg):
        """Processes given message.

        Decides what kind of message it is -- client or worker -- and
        calls the appropriate method. If unknown, the message is
        ignored.

        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        return_addresses, msg = self.split_address(msg)
        # dispatch on first frame after path
        method_to_call = None
        try:
            t = msg.pop(0)
            if t.startswith(b'MDPW'):
                method_to_call = self.on_worker
            elif t.startswith(b'MDPC'):
                method_to_call = self.on_client
            else:
                # Unknown protocol
                pass
        except (AttributeError, IndexError):
            # Wrong incoming msg format
            pass
        if method_to_call is not None:
            method_to_call(return_addresses, msg)
        return

    def on_client(self, return_addresses, message):
        """Method called on client message.

        Frame 0 of msg is the command id
        Frame 1 of msg is the requested service.
        The remaining frames are the request to forward to the worker.

        .. note::

           If the service is unknown to the broker the message is
           ignored.

        If a worker is available for the requested service, the
        message is repackaged and sent to the worker.

        If the service name starts with `mmi.`, the message is passed to
        the internal MMI_ handler.

        .. _MMI: http://rfc.zeromq.org/spec:8

        If the service name starts with `multicast.`, the message is sent
        to all worker in that group.

        :param return_addresses:    return address stack
        :type return_addresses:     list of str
        :param message:   message parts
        :type message:    list of str

        :rtype: None
        """

        cmd = message.pop(0)  # always 0x01
        service = message.pop(0)

        # mmi requests
        if service.startswith(b'mmi.'):
            self.on_client_mmi(return_addresses, service, message)
            return

        # multicast requests
        if service.startswith(b'multicast.'):
            self.on_client_multicast(return_addresses, service, message)
            return

        # worker requests
        try:
            available_workers = self._services[service]
            random_worker = choice(available_workers)  # TODO: loadbalancing
            to_send = [random_worker.id, b'', MDP_WORKER_VERSION, b'\x02']
            to_send.extend(return_addresses)
            to_send.append(b'')
            to_send.extend(message)
            self.stream.send_multipart(to_send)

        except KeyError:
            # unknwon service
            self.client_response(return_addresses, b'broker', b'No worker available', error=True)
        return

    def on_client_multicast(self, return_addresses, service, message):
        """Handling multicast messages from client

        :param return_addresses:    return address stack
        :type return_addresses:     list of str
        :param service:   name of mmi service
        :type service:    str
        :param message:   message parts
        :type message:    list of str
        """
        target = service[10:]  # remove 'multicast.'
        try:
            # first, prepare list of workers in target multicast
            grouped_by_names = {}
            for worker in self._multicasts[target]:
                if worker.service in grouped_by_names:
                    grouped_by_names[worker.service].append(worker)
                else:
                    grouped_by_names[worker.service] = [worker]

            # send message to one worker per service
            sent_messages = []
            for name, workers in grouped_by_names.items():
                random_worker = choice(workers)  # TODO: loadbalancing
                to_send = [random_worker.id, b'', MDP_WORKER_VERSION, b'\x02']
                to_send.extend(return_addresses)
                to_send.append(b'')
                to_send.extend(message)
                self.stream.send_multipart(to_send)
                sent_messages.append(random_worker.service)

            # notify client with list of services in multicast group
            client_msg = return_addresses[:]
            client_msg.extend([b'', MDP_WORKER_VERSION, b'\x05'])
            client_msg.extend(sent_messages)
            self.stream.send_multipart(client_msg)
        except KeyError:
            # unknwon service
            self.client_response(return_addresses, b'broker', b'No services available in this multicast', error=True)
        return

    def on_client_mmi(self, return_addresses, service, message):
        """Handling MMI messages from client

        :param return_addresses:    return address stack
        :type return_addresses:     list of str
        :param service:   name of mmi service
        :type service:    str
        :param message:   message parts
        :type message:    list of str
        """
        if service == b'mmi.service':
            return self.on_client_mmi_service(return_addresses, service, message)
        elif service == b'mmi.services':
            return self.on_client_mmi_services(return_addresses, service, message)
        elif service == b'mmi.workers':
            return self.on_client_mmi_workers(return_addresses, service, message)
        elif service == b'mmi.multicasts':
            return self.on_client_mmi_multicasts(return_addresses, service, message)
        else:
            # unknown mmi service - notify client
            self.client_response(return_addresses, b'broker', b'Service not found', error=True)

    def on_client_mmi_service(self, return_addresses, service, message):
        """Check if services exists
        """
        return self.client_response_pack(return_addresses, b'broker', message[0] in self._services.keys())

    def on_client_mmi_services(self, return_addresses, service, message):
        """List of all services
        """
        return self.client_response_pack(return_addresses, b'broker', [k for k in self._services])

    def on_client_mmi_workers(self, return_addresses, service, message):
        """Number of workers per service
        """
        s = {}
        for se in self._services:
            s[se] = len(self._services[se])
        return self.client_response_pack(return_addresses, b'broker', s)

    def on_client_mmi_multicasts(self, return_addresses, service, message):
        """List of available multicast groups
        """
        m = {}
        for se in self._multicasts:
            m[se] = [s.service for s in self._multicasts[se]]
        return self.client_response_pack(return_addresses, b'broker', m)

    def on_worker(self, return_addresses, message):
        """Method called on worker message.

        Frame 0 of msg is the command id.
        The remaining frames depend on the command.

        This method determines the command sent by the worker and
        calls the appropriate method. If the command is unknown the
        message is ignored and a DISCONNECT is sent.

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        cmd = message.pop(0)

        worker_cmds = {
            b'\x01': self.on_worker_ready,
            b'\x03': self.on_worker_partial_reply,
            b'\x04': self.on_worker_final_reply,
            b'\x05': self.on_worker_heartbeat,
            b'\x06': self.on_worker_disconnect,
            b'\x07': self.on_worker_multicast_add,  # this is not part of the Majordomo Protocol 0.2 !
            b'\x08': self.on_worker_exception,  # this is not part of the Majordomo Protocol 0.2 !
            b'\x09': self.on_worker_error,  # this is not part of the Majordomo Protocol 0.2 !
        }
        if cmd in worker_cmds:
            fnc = worker_cmds[cmd]
            fnc(return_addresses, message)
        else:
            # ignore unknown command
            pass
        return

    def on_worker_ready(self, return_addresses, message):
        """Called when new worker is ready to receive messages.

        Register worker to list of available services.

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        # Frame 0 of msg is a service.
        service = message.pop(0)
        wid = return_addresses[0]
        return self.register_worker(wid, service)

    def on_worker_partial_reply(self, return_addresses, message):
        """Process worker PARTIAL REPLY command.

        Route the `message` to the client given by the address(es) in front of `message`.

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        ret_id = return_addresses[0]
        try:
            wrep = self._workers[ret_id]
        except KeyError:
            return  # worker is gone, ignore this message

        return_addresses, msg = self.split_address(message)

        self.client_response(return_addresses, wrep.service, msg, partial=True)
        return

    def on_worker_final_reply(self, return_addresses, message):
        """Process worker FINAL REPLY command.

        Route the `message` to the client given by the address(es) in front of `message`.

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        ret_id = return_addresses[0]
        try:
            wrep = self._workers[ret_id]
        except KeyError:
            return  # worker is gone, ignore this message

        return_addresses, msg = self.split_address(message)

        self.client_response(return_addresses, wrep.service, msg)
        return

    def on_worker_heartbeat(self, return_addresses, message):
        """Process worker HEARTBEAT command.

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        ret_id = return_addresses[0]
        try:
            worker = self._workers[ret_id]
            if worker.is_alive():
                worker.on_heartbeat()
        except KeyError:
            # ignore HB for unknown worker
            pass
        return

    def on_worker_disconnect(self, return_addresses, message):
        """Process worker DISCONNECT command.

        Remove worker from list of services.

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        wid = return_addresses[0]
        return self.unregister_worker(wid)

    def on_worker_multicast_add(self, return_addresses, message):
        """Process worker MULTICAST ADD command.

        Add worker to list of multicasts
        This is not part of the Majordomo Protocol 0.2 !

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        multicast_name = message.pop(0)
        wid = return_addresses[0]
        return self.register_multicast(wid, multicast_name)

    def on_worker_exception(self, return_addresses, message):
        """Process worker EXCEPTION command.

        Route the `message` to the client given by the address(es) in front of `message`.
        This is not part of the Majordomo Protocol 0.2 !

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        ret_id = return_addresses[0]
        try:
            wrep = self._workers[ret_id]
        except KeyError:
            return  # worker is gone, ignore this message

        return_addresses, msg = self.split_address(message)

        self.client_response(return_addresses, wrep.service, msg, exception=True)
        return

    def on_worker_error(self, return_addresses, message):
        """Process worker ERROR command.

        Route the `message` to the client given by the address(es) in front of `message`.
        This is not part of the Majordomo Protocol 0.2 !

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        ret_id = return_addresses[0]
        try:
            wrep = self._workers[ret_id]
        except KeyError:
            return  # worker is gone, ignore this message

        return_addresses, msg = self.split_address(message)

        self.client_response(return_addresses, wrep.service, msg, error=True)
        return

    def on_timer(self):
        """Method called on timer expiry.

        Checks which workers are dead and unregisters them.

        :rtype: None
        """
        for wrep in self._workers.values():
            if not wrep.is_alive():
                self.on_log_event('worker.connection_timeout', "Worker connection timeout for service '%s'." % wrep.service)
                self.unregister_worker(wrep.id)
        return

    def client_response(self, return_addresses, service, msg, partial=False, exception=False, error=False):
        """Package and send reply to client.

        :param return_addresses:       return address stack
        :type return_addresses:        list of str
        :param service:  name of service
        :type service:   str
        :param msg:      message parts
        :type msg:       list of str | str

        :rtype: None
        """
        to_send = return_addresses[:]
        if error:
            t = b'\x04'
        elif exception:
            t = b'\x06'
        elif partial:
            t = b'\x02'
        else:
            t = b'\x03'
        to_send.extend([b'', MDP_WORKER_VERSION, t, service])
        if isinstance(msg, list):
            to_send.extend(msg)
        else:
            to_send.append(msg)
        self.stream.send_multipart(to_send)
        return

    def client_response_pack(self, return_addresses, service, msg, partial=False):
        """Send message to client and pack it (msg) in msgpack format

        Exception and error messages are not allowed here.

        :param return_addresses:       return address stack
        :type return_addresses:        list of str
        :param service:  name of service
        :type service:   str
        :param msg:      message to pack and send
        :type msg:       mixed
        :param partial:  if message is partial of final, default False
        :type partial:   bool

        :rtype: None
        """
        packed = msgpack.Packer().pack(msg)
        self.client_response(return_addresses, service, packed, partial=partial)

    def unregister_worker(self, wid):
        """Unregister the worker with the given id.

        If the worker id is not registered, nothing happens.

        Will stop all timers for the worker.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """
        try:
            wrep = self._workers[wid]
        except KeyError:
            # not registered, ignore
            return
        wrep.shutdown()
        service = wrep.service

        # remove worker from service list
        if service in self._services:
            worker_list = self._services[service]
            for worker in worker_list:
                if worker.id == wid:
                    worker_list.remove(worker)
            if not worker_list:
                del self._services[service]

        # remove worker from multicasts
        for m_name in self._multicasts:
            mw = self._multicasts[m_name]
            for w in [w for w in mw if w.id == wid]:
                mw.remove(w)

        # delete empty rows
        empty_keys = [k for k, v in self._multicasts.items() if len(v) == 0]
        for k in empty_keys:
            del self._multicasts[k]

        del self._workers[wid]
        self.on_log_event('worker.unregister', "Worker for service '%s' disconnected." % service)
        return

    def register_worker(self, wid, service):
        """Register the worker id and add it to the given service.

        Does nothing if worker is already known.

        :param wid:    the worker id.
        :type wid:     str
        :param service:    the service name.
        :type service:     str

        :rtype: None
        """
        if wid in self._workers:
            return
        worker = WorkerRep(wid, service, self.stream)
        self._workers[wid] = worker

        if service in self._services:
            s = self._services[service]
            s.append(worker)
        else:
            self._services[service] = [worker]
        self.on_log_event('worker.register', "Worker for service '%s' is connected." % service)
        return

    def register_multicast(self, wid, multicast_name):
        """Add worker to multicast group

        :type wid:       str
        :param wid:      the worker id.
        :type multicast_name:  str
        :param multicast_name: group name
        """
        if wid not in self._workers:
            return
        worker = self._workers[wid]
        if multicast_name in self._multicasts:
            m = self._multicasts[multicast_name]
            m.append(worker)
        else:
            self._multicasts[multicast_name] = [worker]
        self.on_log_event('worker.register_multicast', "Service '%s' added to multicast group '%s'." % (worker.service, multicast_name))
        return

    def shutdown(self):
        """Shutdown broker.

        Will unregister all workers, stop all timers and ignore all further
        messages.

        .. warning:: The instance MUST not be used after :func:`shutdown` has been called.

        :rtype: None
        """
        self.stream.on_recv(None)
        self.stream.socket.setsockopt(zmq.LINGER, 0)
        self.stream.socket.close()
        self.stream.close()
        self.stream = None

        self._workers = {}
        self._services = {}
        self._multicasts = {}
        return

    def on_log_event(self, event, message):
        """Override this method if you want to log events from broker

        :type event:    str
        :param event:   event type - used for filtering
        :type message:  str
        :param message: log message

        :rtype: None
        """
        pass

    # helpers:
    def split_address(self, msg):
        """Function to split return Id and message received by ROUTER socket.

        Returns 2-tuple with return Id and remaining message parts.
        Empty frames after the Id are stripped.
        """
        ret_ids = []
        for i, p in enumerate(msg):
            if p:
                ret_ids.append(p)
            else:
                break
        return ret_ids, msg[i + 1:]
Esempio n. 30
0
class LogWatch_HandleStats(PgLogForwardPlugin):
    LOG_FORMATS = ['netstr']

    def init(self, log_fmt):
        super(LogWatch_HandleStats, self).init(log_fmt)

        # depends on pg_settings.log_function_calls
        self.parse_statements = self.cf.getbool('parse_statements', True)

        self.msg_suffix = self.cf.get('msg-suffix', 'confdb')
        if self.msg_suffix and not is_msg_req_valid(self.msg_suffix):
            self.log.error("invalid msg-suffix: %s", self.msg_suffix)
            self.msg_suffix = None

        self.hostname = socket.gethostname()
        self.stat_queue_name = self.cf.get('stat_queue_name', '')
        self.max_stat_items = self.cf.get('max_stat_items', 10000)
        self.stat_dump_interval = self.cf.getint('stat_interval', 3600)
        self.last_stat_dump = time.time()
        self.client_stats = {}

        self.timer = PeriodicCallback(self.save_stats,
                                      self.stat_dump_interval * 1000)
        self.timer.start()

    def process_netstr(self, data):
        """
        Process contents of collected log chunk.
        This might be a SQL statement or a connect/disconnect entry.
        """
        if not self.stat_queue_name:
            return

        if data['remotehost'] == "[local]":
            data['remotehost'] = "127.0.0.1"

        action = None
        action_duration = 0
        statement_duration = 0
        call_count = 0

        if data['message'].startswith("connection authorized:"):
            action = "connect"
        elif data['message'].startswith("disconnection"):
            action = "disconnect"
            m = rc_disconnect.match(data['message'])
            if m:
                action_duration = (int(m.group('hours')) * 3600 +
                                   int(m.group('minutes')) * 60 +
                                   float(m.group('seconds'))) * 1000
        elif not self.parse_statements:
            # we have function logging enabled, see if we can use it
            m = rc_logged_func.search(data['message'])
            if m:
                # a logged function call, definitely prefer this to parsing
                action = m.group('func_name')
                action_duration = float(m.group('time')) / 1000
                call_count = int(m.group('calls'))
        if not action:
            # we have to parse function call
            m = rc_sql.search(data['message'])
            if m:
                if self.parse_statements:
                    # attempt to parse the function name and parameters
                    #action = self.get_sql_action (m.group('sql'))
                    call_count = 1
                # count the overall statement duration
                action_duration = float(m.group('duration'))
                statement_duration = action_duration

        self._update_stats(data, action, action_duration, call_count)
        self._update_stats(data, "SQL statements", statement_duration,
                           call_count)

    def _update_stats(self, data, action, duration, call_count):
        if action:
            key = (data['database'], data['username'], data['remotehost'],
                   action)
            cs = self.client_stats.get(key)
            if cs:
                cs.update(duration, call_count)
            elif len(self.client_stats) > self.max_stat_items:
                self.log.error("Max stat items exceeded: %i",
                               self.max_stat_items)
            else:
                cs = ClientStats(data['database'], data['username'],
                                 data['remotehost'], action, duration,
                                 call_count)
                self.client_stats[key] = cs

    def save_stats(self):
        """
        Dump client stats to database.  Scheduled to be called periodically.
        """

        # do not send stats if stats is missing or stats queue is missing
        if not self.client_stats or not self.stat_queue_name:
            return

        now = time.time()
        time_passed = now - self.last_stat_dump
        self.log.info("Sending usage stats to repository [%i]",
                      len(self.client_stats))

        # post role usage
        usage = []
        for client in self.client_stats.values():
            self.log.trace("client: %s", client)
            usage.append(client.to_dict())

        params = skytools.db_urlencode(
            dict(hostname=self.hostname,
                 sample_length='%d seconds' % time_passed,
                 snap_time=datetime.datetime.now().isoformat()))
        confdb_funcargs = ('username=discovery', params,
                           skytools.make_record_array(usage))

        funcargs = [
            None, self.stat_queue_name, 'dba.set_role_usage',
            skytools.db_urlencode(dict(enumerate(confdb_funcargs)))
        ]

        msg = DatabaseMessage(function='pgq.insert_event',
                              params=cc.json.dumps(funcargs))
        if self.msg_suffix:
            msg.req += '.' + self.msg_suffix
        self.main.ccpublish(msg)

        self.client_stats = {}
        self.last_stat_dump = now

    def stop(self):
        self.timer.stop()
Esempio n. 31
0
class Shell(PromptSession):
    def __init__(self, history_filepath=None):
        self._logger = logging.getLogger(__name__)
        if history_filepath is None:
            self._logger.info(' getting default history filepath.')
            history_filepath = _get_default_history_filepath()

        self._logger.info(' history filepath %s', history_filepath)
        self.history = FileHistory(history_filepath)
        self.messaging = _Messaging('shell', run_control_loop=True)

        # TODO: Cleanup this API access
        self.messaging._heartbeat_reciever.identity_callback = self._identity_callback
        self._thread = _Thread(target=self.messaging.start, daemon=True)

        self._bot_status_monitor = PeriodicCallback(self._monitor_bot_state,
                                                    1000)

        self.shebangs = [
            '!',
        ]
        self.command_observer = CommandObserver(self.messaging, prompt=self)
        commands = self.command_observer._get_commands()
        self._word_completer = WordCompleter(commands, WORD=_WORD)
        self._services_completer = ServiceCompleter(self._word_completer)
        self.author_interface = AuthorInterface(self._word_completer,
                                                self.messaging)

        self.service_interface = ServiceInterface(self._word_completer,
                                                  self.messaging)

        self.entity_interface = EntityInterface(self.author_interface,
                                                self.service_interface)

        self._bot_status = ''

        super().__init__(message='vexbot: ',
                         history=self.history,
                         completer=self._services_completer,
                         enable_system_prompt=True,
                         enable_suspend=True,
                         enable_open_in_editor=True,
                         complete_while_typing=False)

        self.print_observer = PrintObserver(self.app)

        self._print_subscription = self.messaging.chatter.subscribe(
            self.print_observer)
        self.messaging.chatter.subscribe(LogObserver())
        self.messaging.command.subscribe(self.command_observer)
        self.messaging.command.subscribe(
            ServicesObserver(self._identity_setter,
                             self._set_service_completion))

    def _identity_setter(self, services: list) -> None:
        try:
            services.remove(self.messaging._service_name)
        except ValueError:
            pass

        for service in services:
            self.service_interface.add_service(service)
            self.messaging.send_command('REMOTE',
                                        remote_command='commands',
                                        target=service,
                                        suppress=True)

    def _set_service_completion(self, service: str, commands: list) -> None:
        shebang = self.shebangs[0]
        commands = [shebang + command for command in commands]
        completer = WordCompleter(commands, WORD=_WORD)
        self._services_completer.set_service_completer(service, completer)

        # FIXME: `!commands` does not update these as it
        if service == 'vexbot':
            for command in commands:
                self._word_completer.words.add(command)

    def _identity_callback(self):
        self.messaging.send_command('services', suppress=True)
        self.messaging.send_command('get_commands', suppress=True)

    def _monitor_bot_state(self):
        time_now = time.time()
        last_message = self.messaging._heartbeat_reciever._last_message_time
        # TODO: put in a countdown since last contact?
        delta_time = time_now - last_message
        # NOTE: Bot is set to send a heartbeat every 1.5 seconds
        if time_now - last_message > 3.4:
            self._bot_status = '<NO BOT>'
        else:
            if self._bot_status == '':
                return
            self._bot_status = ''

        self.app.invalidate()

    def is_command(self, text: str) -> bool:
        """
        checks for presence of shebang in the first character of the text
        """
        if text[0] in self.shebangs:
            return True

        return False

    def run(self):
        self._thread.start()
        self._bot_status_monitor.start()
        with patch_stdout(raw=True):
            while True:
                # Get our text
                try:
                    text = self.prompt(rprompt=self._get_rprompt_tokens)
                # KeyboardInterrupt continues
                except KeyboardInterrupt:
                    continue
                # End of file returns
                except EOFError:
                    return

                # Clean text
                text = text.lstrip()
                # Handle empty text
                if text == '':
                    self._logger.debug(' empty string found')
                    continue
                # Program specific handeling. Currently either first word
                # or second word can be commands
                for string in text.split('&&'):
                    self._handle_text(string)

    def _get_rprompt_tokens(self):
        return self._bot_status

    def _handle_text(self, text: str):
        """
        Check to see if text is a command. Otherwise, check to see if the
        second word is a command.

        Commands get handeled by the `_handle_command` method

        If not command, check to see if first word is a service or an author.
        Default program is to send message replies. However, we will also
        check to see if the second word is a command and handle approparitly

        This method does simple string parsing and high level program control
        """
        # If first word is command
        if self.is_command(text):
            self._logger.debug(' first word is a command')
            # get the command, args, and kwargs out using `shlex`
            command, args, kwargs = _get_cmd_args_kwargs(text)
            self._logger.info(' command: %s, %s %s', command, args, kwargs)
            # hand off to the `handle_command` method
            result = self._handle_command(command, args, kwargs)

            if result:
                if isinstance(result, str):
                    print(result)
                else:
                    _pprint.pprint(result)
            # Exit the method here if in this block
            return
        # Else first word is not a command
        else:
            self._logger.debug(' first word is not a command')
            # get the first word and then the rest of the text.
            try:
                first_word, second_word = text.split(' ', 1)
                self._logger.debug(' first word: %s', first_word)
            except ValueError:
                self._logger.debug('No second word in chain!')
                return self._handle_NLP(text)
            # check if second word/string is a command
            if self.is_command(second_word):
                self._logger.info(' second word is a command')
                # get the command, args, and kwargs out using `shlex`
                command, args, kwargs = _get_cmd_args_kwargs(second_word)
                self._logger.debug(' second word: %s', command)
                self._logger.debug(' command %s', command)
                self._logger.debug('args %s ', args)
                self._logger.debug('kwargs %s', kwargs)
                return self._first_word_not_cmd(first_word, command, args,
                                                kwargs)
            # if second word is not a command, default to NLP
            else:
                self._logger.info(' defaulting to message since second word '
                                  'isn\'t a command')

                return self._handle_NLP(text)

    def _handle_NLP(self, text: str):
        entities = self.entity_interface.get_entities(text)
        self.messaging.send_command('NLP', text=text, entities=entities)

    def _handle_command(self, command: str, args: tuple, kwargs: dict):
        if kwargs.get('remote', False):
            self._logger.debug(' `remote` in kwargs, sending command')
            self.messaging.send_command(command, *args, **kwargs)
            return
        if self.command_observer.is_command(command):
            self._logger.debug(' `%s` is a command', command)
            # try:
            return self.command_observer.handle_command(
                command, *args, **kwargs)
            """
            except Exception as e:
                self.command_observer.on_error(e, command, args, kwargs)
                return
            """
        else:
            self._logger.debug('command not found! Sending to bot')
            self.messaging.send_command(command, *args, **kwargs)

    def _first_word_not_cmd(self, first_word: str, command: str, args: tuple,
                            kwargs: dict) -> None:
        """
        check to see if this is an author or service.
        This method does high level control handling
        """
        if self.service_interface.is_service(first_word):
            self._logger.debug(' first word is a service')
            kwargs = self.service_interface.get_metadata(first_word, kwargs)
            self._logger.debug(' service transform kwargs: %s', kwargs)
        elif self.author_interface.is_author(first_word):
            self._logger.debug(' first word is an author')
            kwargs = self.author_interface.get_metadata(first_word, kwargs)
            self._logger.debug(' author transform kwargs: %s', kwargs)
        if not kwargs.get('remote'):
            kwargs['remote_command'] = command
            command = 'REMOTE'
            self.messaging.send_command(command, *args, **kwargs)
            return
        else:
            self.messaging.send_command(command, *args, **kwargs)
Esempio n. 32
0
class BinaryStar(object):
    ctx = None              # Our private context
    loop = None             # Reactor loop
    statepub = None         # State publisher
    statesub = None         # State subscriber
    state = None            # Current state
    event = None            # Current event
    peer_expiry = 0         # When peer is considered 'dead'
    voter_callback = None   # Voting socket handler
    master_callback = None  # Call when become master
    slave_callback = None   # Call when become slave
    heartbeat = None        # PeriodicCallback for 
    
    def __init__(self, primary, local, remote):
        # initialize the Binary Star
        self.ctx = zmq.Context()
        self.loop = IOLoop.instance()
        self.state = STATE_PRIMARY if primary else STATE_BACKUP
        
        # Create publisher for state going to peer
        self.statepub = self.ctx.socket(zmq.PUB)
        self.statepub.bind(local)
        
        # Create subscriber for state coming from peer
        self.statesub = self.ctx.socket(zmq.SUB)
        self.statesub.setsockopt(zmq.SUBSCRIBE, '')
        self.statesub.connect(remote)
        
        # wrap statesub in ZMQStream for event triggers
        self.statesub = ZMQStream(self.statesub, self.loop)
        
        # setup basic reactor events
        self.heartbeat = PeriodicCallback(self.send_state, HEARTBEAT, self.loop)
        self.statesub.on_recv(self.recv_state)
        
        # setup log formmater

    def update_peer_expiry(self):
        """Update peer expiry time to be 2 heartbeats from now."""
        self.peer_expiry = time.time() + 2e-3 * HEARTBEAT

    def start(self):
        self.update_peer_expiry()
        self.heartbeat.start()
        return self.loop.start()
    
    def execute_fsm(self):
        """Binary Star finite state machine (applies event to state)
        
        returns True if connections should be accepted, False otherwise.
        """
        accept = True
        if (self.state == STATE_PRIMARY):
            # Primary server is waiting for peer to connect
            # Accepts CLIENT_REQUEST events in this state
            if (self.event == PEER_BACKUP):
                print ("I: connected to backup (slave), ready as master")
                self.state = STATE_ACTIVE
                if (self.master_callback):
                    self.loop.add_callback(self.master_callback)
            elif (self.event == PEER_ACTIVE):
                print ("I: connected to backup (master), ready as slave")
                self.state = STATE_PASSIVE
                if (self.slave_callback):
                    self.loop.add_callback(self.slave_callback)
            elif (self.event == CLIENT_REQUEST):
                if (time.time () >= self.peer_expiry):
                    print ("I: request from client, ready as master")
                    self.state = STATE_ACTIVE
                    if (self.master_callback):
                        self.loop.add_callback(self.master_callback)
                else:
                    # don't respond to clients yet - we don't know if
                    # the backup is currently Active as a result of
                    # a successful failover
                    accept = False
        elif (self.state == STATE_BACKUP):
            # Backup server is waiting for peer to connect
            # Rejects CLIENT_REQUEST events in this state
            if (self.event == PEER_ACTIVE):
                print ("I: connected to primary (master), ready as slave")
                self.state = STATE_PASSIVE
                if (self.slave_callback):
                    self.loop.add_callback(self.slave_callback)
            elif (self.event == CLIENT_REQUEST):
                accept = False
        elif (self.state == STATE_ACTIVE):
            # Server is active
            # Accepts CLIENT_REQUEST events in this state
            # The only way out of ACTIVE is death
            if (self.event == PEER_ACTIVE):
                # Two masters would mean split-brain
                print ("E: fatal error - dual masters, aborting")
                raise FSMError("Dual Masters")
        elif (self.state == STATE_PASSIVE):
            # Server is passive
            # CLIENT_REQUEST events can trigger failover if peer looks dead
            if (self.event == PEER_PRIMARY):
                # Peer is restarting - become active, peer will go passive
                print ("I: primary (slave) is restarting, ready as master")
                self.state = STATE_ACTIVE
            elif (self.event == PEER_BACKUP):
                # Peer is restarting - become active, peer will go passive
                print ("I: backup (slave) is restarting, ready as master")
                self.state = STATE_ACTIVE
            elif (self.event == PEER_PASSIVE):
                # Two passives would mean cluster would be non-responsive
                print ("E: fatal error - dual slaves, aborting")
                raise FSMError("Dual slaves")
            elif (self.event == CLIENT_REQUEST):
                # Peer becomes master if timeout has passed
                # It's the client request that triggers the failover
                assert (self.peer_expiry > 0)
                if (time.time () >= self.peer_expiry):
                    # If peer is dead, switch to the active state
                    print ("I: failover successful, ready as master")
                    self.state = STATE_ACTIVE
                else:
                    # If peer is alive, reject connections
                    accept = False
            # Call state change handler if necessary
            if (self.state == STATE_ACTIVE and self.master_callback):
                self.loop.add_callback(self.master_callback)
        return accept


    # ---------------------------------------------------------------------
    # Reactor event handlers...

    def send_state (self):
        """Publish our state to peer"""
        self.statepub.send("%d" % self.state)

    def recv_state (self, msg):
        """Receive state from peer, execute finite state machine"""
        state = msg[0]
        if state:
            self.event = int(state)
            self.update_peer_expiry()
        self.execute_fsm()

    def voter_ready(self, msg):
        """Application wants to speak to us, see if it's possible"""
        # If server can accept input now, call appl handler
        self.event = CLIENT_REQUEST
        if self.execute_fsm():
            print "CLIENT REQUEST"
            self.voter_callback(self.voter_socket, msg)
        else:
            # Message will be ignored
            pass
    
    # -------------------------------------------------------------------------
    #
    
    def register_voter(self, endpoint, type, handler):
        """Create socket, bind to local endpoint, and register as reader for
        voting. The socket will only be available if the Binary Star state
        machine allows it. Input on the socket will act as a "vote" in the
        Binary Star scheme.  We require exactly one voter per bstar instance.
        
        handler will always be called with two arguments: (socket,msg)
        where socket is the one we are creating here, and msg is the message
        that triggered the POLLIN event.
        """
        assert self.voter_callback is None
        
        socket = self.ctx.socket(type)
        socket.bind(endpoint)
        self.voter_socket = socket
        self.voter_callback = handler
        
        stream = ZMQStream(socket, self.loop)
        stream.on_recv(self.voter_ready)
Esempio n. 33
0
class CryptoTransportLayer(TransportLayer):

    def __init__(self, ob_ctx, db):

        self.ob_ctx = ob_ctx

        self.log = logging.getLogger(
            '[%s] %s' % (ob_ctx.market_id, self.__class__.__name__)
        )
        requests_log = logging.getLogger("requests")
        requests_log.setLevel(logging.WARNING)

        self.db = db

        self.bitmessage_api = None
        if (ob_ctx.bm_user, ob_ctx.bm_pass, ob_ctx.bm_port) != (None, None, -1):
            if not self._connect_to_bitmessage():
                self.log.info('Bitmessage not installed or started')

        self.market_id = ob_ctx.market_id
        self.nick_mapping = {}
        self.uri = network_util.get_peer_url(ob_ctx.server_ip, ob_ctx.server_port)
        self.ip = ob_ctx.server_ip
        self.nickname = ""
        self.dev_mode = ob_ctx.dev_mode

        self.all_messages = (
            'hello',
            'findNode',
            'findNodeResponse',
            'store'
        )

        self._setup_settings()
        ob_ctx.market_id = self.market_id
        self.dht = DHT(self, self.market_id, self.settings, self.db)
        TransportLayer.__init__(self, ob_ctx, self.guid, self.nickname)
        self.start_listener()

        if ob_ctx.enable_ip_checker and not ob_ctx.seed_mode and not ob_ctx.dev_mode:
            self.start_ip_address_checker()

    def start_listener(self):
        self.add_callbacks([
            (
                msg,
                {
                    'cb': getattr(self, 'on_%s' % msg),
                    'validator_cb': getattr(self, 'validate_on_%s' % msg)
                }
            )
            for msg in self.all_messages
        ])

        self.listener = connection.CryptoPeerListener(
            self.ip, self.port, self.pubkey, self.secret, self.ctx,
            self.guid,
            self._on_message
        )

        self.listener.set_ok_msg({
            'type': 'ok',
            'senderGUID': self.guid,
            'pubkey': self.pubkey,
            'senderNick': self.nickname
        })
        self.listener.listen()

    def start_ip_address_checker(self):
        '''Checks for possible public IP change'''
        if self.ob_ctx.enable_ip_checker:
            self.caller = PeriodicCallback(self._ip_updater_periodic_callback, 5000, ioloop.IOLoop.instance())
            self.caller.start()
            self.log.info("IP_CHECKER_ENABLED: Periodic IP Address Checker started.")

    def _ip_updater_periodic_callback(self):
        if self.ob_ctx.enable_ip_checker:
            new_ip = network_util.get_my_ip()

            if not new_ip or new_ip == self.ip:
                return

            self.ob_ctx.server_ip = new_ip
            self.ip = new_ip

            if self.listener is not None:
                self.listener.set_ip_address(new_ip)

            self.dht._iterativeFind(self.guid, [], 'findNode')

    def save_peer_to_db(self, peer_tuple):
        uri = peer_tuple[0]
        pubkey = peer_tuple[1]
        guid = peer_tuple[2]
        nickname = peer_tuple[3]

        # Update query
        self.db.deleteEntries("peers", {"uri": uri, "guid": guid}, "OR")
        if guid is not None:
            self.db.insertEntry("peers", {
                "uri": uri,
                "pubkey": pubkey,
                "guid": guid,
                "nickname": nickname,
                "market_id": self.market_id
            })

    def _connect_to_bitmessage(self):
        # Get bitmessage going
        # First, try to find a local instance
        result = False
        bm_user = self.ob_ctx.bm_user
        bm_pass = self.ob_ctx.bm_pass
        bm_port = self.ob_ctx.bm_port
        try:
            self.log.info(
                '[_connect_to_bitmessage] Connecting to Bitmessage on port %s',
                bm_port
            )
            self.bitmessage_api = xmlrpclib.ServerProxy(
                "http://{}:{}@localhost:{}/".format(bm_user, bm_pass, bm_port),
                verbose=0
            )
            result = self.bitmessage_api.add(2, 3)
            self.log.info(
                "[_connect_to_bitmessage] Bitmessage API is live: %s",
                result
            )
        # If we failed, fall back to starting our own
        except Exception as e:
            self.log.info("Failed to connect to bitmessage instance: %s", e)
            self.bitmessage_api = None
        return result

    def validate_on_hello(self, msg):
        self.log.debug('Validating ping message.')
        return True

    def on_hello(self, msg):
        self.log.info('Pinged %s', json.dumps(msg, ensure_ascii=False))

    def validate_on_store(self, msg):
        self.log.debug('Validating store value message.')
        return True

    def on_store(self, msg):
        self.dht._on_storeValue(msg)

    def validate_on_findNode(self, msg):
        self.log.debug('Validating find node message.')
        return True

    def on_findNode(self, msg):
        self.dht.on_find_node(msg)

    def validate_on_findNodeResponse(self, msg):
        self.log.debug('Validating find node response message.')
        return True

    def on_findNodeResponse(self, msg):
        self.dht.on_findNodeResponse(self, msg)

    def _setup_settings(self):
        try:
            self.settings = self.db.selectEntries("settings", {"market_id": self.market_id})
        except (OperationalError, DatabaseError) as e:
            print e
            raise SystemExit("database file %s corrupt or empty - cannot continue" % self.db.db_path)

        if len(self.settings) == 0:
            self.settings = {"market_id": self.market_id, "welcome": "enable"}
            self.db.insertEntry("settings", self.settings)
        else:
            self.settings = self.settings[0]

        # Generate PGP key during initial setup or if previous PGP gen failed
        if not self.settings.get('PGPPubKey'):
            try:
                self.log.info('Generating PGP keypair. This may take several minutes...')
                print 'Generating PGP keypair. This may take several minutes...'
                gpg = gnupg.GPG()
                input_data = gpg.gen_key_input(key_type="RSA",
                                               key_length=2048,
                                               name_email='*****@*****.**',
                                               name_comment="Autogenerated by Open Bazaar",
                                               passphrase="P@ssw0rd")
                assert input_data is not None
                key = gpg.gen_key(input_data)
                assert key is not None

                pubkey_text = gpg.export_keys(key.fingerprint)
                newsettings = {"PGPPubKey": pubkey_text, "PGPPubkeyFingerprint": key.fingerprint}
                self.db.updateEntries("settings", newsettings, {"market_id": self.market_id})
                self.settings.update(newsettings)

                self.log.info('PGP keypair generated.')
            except Exception as e:
                sys.exit("Encountered a problem with GPG: %s" % e)

        if not self.settings.get('pubkey'):
            # Generate Bitcoin keypair
            self._generate_new_keypair()

        if not self.settings.get('nickname'):
            newsettings = {'nickname': 'Default'}
            self.db.updateEntries('settings', newsettings, {"market_id": self.market_id})
            self.settings.update(newsettings)

        self.nickname = self.settings.get('nickname', '')
        self.secret = self.settings.get('secret', '')
        self.pubkey = self.settings.get('pubkey', '')
        self.privkey = self.settings.get('privkey')
        self.btc_pubkey = privkey_to_pubkey(self.privkey)
        self.guid = self.settings.get('guid', '')
        self.sin = self.settings.get('sin', '')
        self.bitmessage = self.settings.get('bitmessage', '')

        if not self.settings.get('bitmessage'):
            # Generate Bitmessage address
            if self.bitmessage_api is not None:
                self._generate_new_bitmessage_address()

        self.cryptor = Cryptor(pubkey_hex=self.pubkey, privkey_hex=self.secret)

        # In case user wants to override with command line passed bitmessage values
        if self.ob_ctx.bm_user is not None and \
           self.ob_ctx.bm_pass is not None and \
           self.ob_ctx.bm_port is not None:
            self._connect_to_bitmessage()

    def _generate_new_keypair(self):
        secret = str(random.randrange(2 ** 256))
        self.secret = hashlib.sha256(secret).hexdigest()
        self.pubkey = privkey_to_pubkey(self.secret)
        self.privkey = random_key()
        self.btc_pubkey = privkey_to_pubkey(self.privkey)
        print 'PUBLIC KEY: ', self.btc_pubkey

        # Generate SIN
        sha_hash = hashlib.sha256()
        sha_hash.update(self.pubkey)
        ripe_hash = hashlib.new('ripemd160')
        ripe_hash.update(sha_hash.digest())

        self.guid = ripe_hash.hexdigest()
        self.sin = obelisk.EncodeBase58Check('\x0F\x02%s' % ripe_hash.digest())

        newsettings = {
            "secret": self.secret,
            "pubkey": self.pubkey,
            "privkey": self.privkey,
            "guid": self.guid,
            "sin": self.sin
        }
        self.db.updateEntries("settings", newsettings, {"market_id": self.market_id})
        self.settings.update(newsettings)

    def _generate_new_bitmessage_address(self):
        # Use the guid generated previously as the key
        self.bitmessage = self.bitmessage_api.createRandomAddress(
            self.guid.encode('base64'),
            False,
            1.05,
            1.1111
        )
        newsettings = {"bitmessage": self.bitmessage}
        self.db.updateEntries("settings", newsettings, {"market_id": self.market_id})
        self.settings.update(newsettings)

    def join_network(self, seeds=None, callback=None):
        if seeds is None:
            seeds = []

        self.log.info('Joining network')

        # Connect up through seed servers
        for idx, seed in enumerate(seeds):
            seeds[idx] = network_util.get_peer_url(seed, "12345")

        # Connect to persisted peers
        db_peers = self.get_past_peers()

        known_peers = list(set(seeds).union(db_peers))

        for known_peer in known_peers:
            self.dht.add_peer(self, known_peer)

        # Populate routing table by searching for self
        if known_peers:
            # Check every one second if we are connected
            # We could use a PeriodicCallback but I think this is simpler
            # since this will be repeated in most cases less than 10 times
            def join_callback():
                # If we are not connected to any node, reschedule a check
                if not self.dht.activePeers:
                    ioloop.IOLoop.instance().call_later(1, join_callback)
                else:
                    self.search_for_my_node()
            join_callback()

        if callback is not None:
            callback('Joined')

    def get_past_peers(self):
        result = self.db.selectEntries("peers", {"market_id": self.market_id})
        return [peer['uri'] for peer in result]

    def search_for_my_node(self):
        self.log.info('Searching for myself')
        self.dht._iterativeFind(self.guid, self.dht.knownNodes, 'findNode')

    def get_crypto_peer(self, guid=None, uri=None, pubkey=None, nickname=None):
        if guid == self.guid:
            self.log.error('Cannot get CryptoPeerConnection for your own node')
            return

        self.log.debug(
            'Getting CryptoPeerConnection'
            '\nGUID: %s'
            '\nURI: %s'
            '\nPubkey:%s'
            '\nNickname:%s',
            guid, uri, pubkey, nickname
        )

        return connection.CryptoPeerConnection(
            self, uri, pubkey, guid=guid, nickname=nickname
        )

    def respond_pubkey_if_mine(self, nickname, ident_pubkey):

        if ident_pubkey != self.pubkey:
            self.log.info("Public key does not match your identity")
            return

        # Return signed pubkey
        pubkey = self.cryptor.pubkey  # XXX: A Cryptor does not have such a field.
        ec_key = obelisk.EllipticCurveKey()
        ec_key.set_secret(self.secret)
        digest = obelisk.Hash(pubkey)
        signature = ec_key.sign(digest)

        # Send array of nickname, pubkey, signature to transport layer
        self.send(proto_response_pubkey(nickname, pubkey, signature))

    def send(self, data, send_to=None, callback=None):

        self.log.debug("Outgoing Data: %s %s", data, send_to)

        # Directed message
        if send_to is not None:

            peer = self.dht.routingTable.getContact(send_to)
            if peer is None:
                for activePeer in self.dht.activePeers:
                    if activePeer.guid == send_to:
                        peer = activePeer
                        break

            if peer:
                self.log.debug('Directed Data (%s): %s', send_to, data)
                try:
                    peer.send(data, callback=callback)
                except Exception as e:
                    self.log.error('Not sending message directly to peer %s', e)
            else:
                self.log.error('No peer found')

        else:
            # FindKey and then send

            for peer in self.dht.activePeers:
                try:
                    routing_peer = self.dht.routingTable.getContact(peer.guid)

                    if routing_peer is None:
                        self.dht.routingTable.addContact(peer)
                        routing_peer = peer

                    data['senderGUID'] = self.guid
                    data['pubkey'] = self.pubkey

                    def cb(msg):
                        self.log.debug('Message Back: \n%s', pformat(msg))

                    routing_peer.send(data, cb)

                except Exception:
                    self.log.info("Error sending over peer!")
                    traceback.print_exc()

    def _on_message(self, msg):

        # here goes the application callbacks
        # we get a "clean" msg which is a dict holding whatever

        pubkey = msg.get('pubkey')
        uri = msg.get('uri')
        guid = msg.get('senderGUID')
        nickname = msg.get('senderNick')[:120]

        self.log.info('On Message: %s', json.dumps(msg, ensure_ascii=False))
        self.dht.add_peer(self, uri, pubkey, guid, nickname)
        t = Thread(target=self.trigger_callbacks, args=(msg['type'], msg,))
        t.start()

    def store(self, *args, **kwargs):
        """
        Store or republish data.

        Refer to the dht module (iterativeStore()) for further details.
        """
        self.dht.iterativeStore(*args, **kwargs)

    def shutdown(self):
        print "CryptoTransportLayer.shutdown()!"
        print "Notice: explicit DHT Shutdown not implemented."

        try:
            if self.bitmessage_api is not None:
                self.bitmessage_api.close()
        except Exception as e:
            # It might not even be open; we can't do much more on our
            # way out if exception is thrown here.
            self.log.error(
                "Could not shutdown bitmessage_api's ServerProxy: %s", e.message
            )
Esempio n. 34
0
File: proxy.py Progetto: postsql/cc
class ProxyHandler(BaseProxyHandler):
    """ Simply proxies further """

    log = skytools.getLogger('h:ProxyHandler')

    ping_tick = 1

    def __init__(self, hname, hcf, ccscript):
        super(ProxyHandler, self).__init__(hname, hcf, ccscript)

        self.ping_remote = self.cf.getbool("ping", False)
        if self.ping_remote:
            self.echo_stats = EchoState(self.remote_url)
            self.echo_timer = PeriodicCallback(self.ping,
                                               self.ping_tick * 1000,
                                               self.ioloop)
            self.echo_timer.start()
            self.log.debug("will ping %s", self.remote_url)

    def on_recv(self, zmsg):
        """ Got message from remote CC, process it. """
        try:
            # pongs to our pings should come back w/o any routing info
            if self.ping_remote and zmsg[0] == '':
                self.log.trace("%r", zmsg)
                cmsg = CCMessage(zmsg)
                req = cmsg.get_dest()
                if req == "echo.response":
                    self._recv_pong(cmsg)
                else:
                    self.log.warn("unknown msg: %s", req)
        except:
            self.log.exception("crashed")
        finally:
            super(ProxyHandler, self).on_recv(zmsg)

    def _recv_pong(self, cmsg):
        """ Pong received, evaluate it. """

        msg = cmsg.get_payload(self.xtx)
        if not msg: return

        if msg.orig_target != self.remote_url:
            self.log.warn("unknown pong: %s", msg.orig_target)
            return
        echo = self.echo_stats
        echo.update_pong(msg)

        rtt = echo.time_pong - msg.orig_time
        if msg.orig_time == echo.time_ping:
            self.log.trace("echo time: %f s (%s)", rtt, self.remote_url)
        elif rtt <= 5 * self.ping_tick:
            self.log.debug("late pong: %f s (%s)", rtt, self.remote_url)
        else:
            self.log.info("too late pong: %f s (%s)", rtt, self.remote_url)

    def _send_ping(self):
        """ Send ping to remote CC. """
        msg = EchoRequestMessage(target=self.remote_url)
        cmsg = self.xtx.create_cmsg(msg)
        self.stream.send_cmsg(cmsg)
        self.echo_stats.update_ping(msg)
        self.log.trace("%r", msg)

    def ping(self):
        """ Echo requesting and monitoring. """
        self.log.trace("")
        echo = self.echo_stats
        if echo.time_ping - echo.time_pong > 5 * self.ping_tick:
            self.log.warn("no pong from %s for %f s", self.remote_url,
                          echo.time_ping - echo.time_pong)
        self._send_ping()

    def stop(self):
        super(ProxyHandler, self).stop()
        self.log.info("stopping")
        if hasattr(self, "echo_timer"):
            self.echo_timer.stop()
Esempio n. 35
0
class CryptoTransportLayer(TransportLayer):
    def __init__(self, ob_ctx, db):

        self.ob_ctx = ob_ctx

        self.log = logging.getLogger(
            '[%s] %s' % (ob_ctx.market_id, self.__class__.__name__))
        requests_log = logging.getLogger("requests")
        requests_log.setLevel(logging.WARNING)

        self.db = db

        self.bitmessage_api = None
        if (ob_ctx.bm_user, ob_ctx.bm_pass, ob_ctx.bm_port) != (None, None,
                                                                -1):
            if not self._connect_to_bitmessage():
                self.log.info('Bitmessage not installed or started')

        self.market_id = ob_ctx.market_id
        self.nick_mapping = {}
        self.uri = network_util.get_peer_url(ob_ctx.server_ip,
                                             ob_ctx.server_port)
        self.ip = ob_ctx.server_ip
        self.nickname = ""
        self.dev_mode = ob_ctx.dev_mode

        self.all_messages = ('hello', 'findNode', 'findNodeResponse', 'store')

        self._setup_settings()
        ob_ctx.market_id = self.market_id
        self.dht = DHT(self, self.market_id, self.settings, self.db)
        TransportLayer.__init__(self, ob_ctx, self.guid, self.nickname)
        self.start_listener()

        if ob_ctx.enable_ip_checker and not ob_ctx.seed_mode and not ob_ctx.dev_mode:
            self.start_ip_address_checker()

    def start_listener(self):
        self.add_callbacks([(msg, {
            'cb':
            getattr(self, 'on_%s' % msg),
            'validator_cb':
            getattr(self, 'validate_on_%s' % msg)
        }) for msg in self.all_messages])

        self.listener = connection.CryptoPeerListener(self.ip, self.port,
                                                      self.pubkey, self.secret,
                                                      self.ctx, self.guid,
                                                      self._on_message)

        self.listener.set_ok_msg({
            'type': 'ok',
            'senderGUID': self.guid,
            'pubkey': self.pubkey,
            'senderNick': self.nickname
        })
        self.listener.listen()

    def start_ip_address_checker(self):
        '''Checks for possible public IP change'''
        if self.ob_ctx.enable_ip_checker:
            self.caller = PeriodicCallback(self._ip_updater_periodic_callback,
                                           5000, ioloop.IOLoop.instance())
            self.caller.start()
            self.log.info(
                "IP_CHECKER_ENABLED: Periodic IP Address Checker started.")

    def _ip_updater_periodic_callback(self):
        if self.ob_ctx.enable_ip_checker:
            new_ip = network_util.get_my_ip()

            if not new_ip or new_ip == self.ip:
                return

            self.ob_ctx.server_ip = new_ip
            self.ip = new_ip

            if self.listener is not None:
                self.listener.set_ip_address(new_ip)

            self.dht._iterativeFind(self.guid, [], 'findNode')

    def save_peer_to_db(self, peer_tuple):
        uri = peer_tuple[0]
        pubkey = peer_tuple[1]
        guid = peer_tuple[2]
        nickname = peer_tuple[3]

        # Update query
        self.db.deleteEntries("peers", {"uri": uri, "guid": guid}, "OR")
        if guid is not None:
            self.db.insertEntry(
                "peers", {
                    "uri": uri,
                    "pubkey": pubkey,
                    "guid": guid,
                    "nickname": nickname,
                    "market_id": self.market_id
                })

    def _connect_to_bitmessage(self):
        # Get bitmessage going
        # First, try to find a local instance
        result = False
        bm_user = self.ob_ctx.bm_user
        bm_pass = self.ob_ctx.bm_pass
        bm_port = self.ob_ctx.bm_port
        try:
            self.log.info(
                '[_connect_to_bitmessage] Connecting to Bitmessage on port %s',
                bm_port)
            self.bitmessage_api = xmlrpclib.ServerProxy(
                "http://{}:{}@localhost:{}/".format(bm_user, bm_pass, bm_port),
                verbose=0)
            result = self.bitmessage_api.add(2, 3)
            self.log.info(
                "[_connect_to_bitmessage] Bitmessage API is live: %s", result)
        # If we failed, fall back to starting our own
        except Exception as e:
            self.log.info("Failed to connect to bitmessage instance: %s", e)
            self.bitmessage_api = None
        return result

    def validate_on_hello(self, msg):
        self.log.debug('Validating ping message.')
        return True

    def on_hello(self, msg):
        self.log.info('Pinged %s', json.dumps(msg, ensure_ascii=False))

    def validate_on_store(self, msg):
        self.log.debug('Validating store value message.')
        return True

    def on_store(self, msg):
        self.dht._on_storeValue(msg)

    def validate_on_findNode(self, msg):
        self.log.debug('Validating find node message.')
        return True

    def on_findNode(self, msg):
        self.dht.on_find_node(msg)

    def validate_on_findNodeResponse(self, msg):
        self.log.debug('Validating find node response message.')
        return True

    def on_findNodeResponse(self, msg):
        self.dht.on_findNodeResponse(self, msg)

    def _setup_settings(self):
        try:
            self.settings = self.db.selectEntries(
                "settings", {"market_id": self.market_id})
        except (OperationalError, DatabaseError) as e:
            print e
            raise SystemExit(
                "database file %s corrupt or empty - cannot continue" %
                self.db.db_path)

        if len(self.settings) == 0:
            self.settings = {"market_id": self.market_id, "welcome": "enable"}
            self.db.insertEntry("settings", self.settings)
        else:
            self.settings = self.settings[0]

        # Generate PGP key during initial setup or if previous PGP gen failed
        if not self.settings.get('PGPPubKey'):
            try:
                self.log.info(
                    'Generating PGP keypair. This may take several minutes...')
                print 'Generating PGP keypair. This may take several minutes...'
                gpg = gnupg.GPG()
                input_data = gpg.gen_key_input(
                    key_type="RSA",
                    key_length=2048,
                    name_email='*****@*****.**',
                    name_comment="Autogenerated by Open Bazaar",
                    passphrase="P@ssw0rd")
                assert input_data is not None
                key = gpg.gen_key(input_data)
                assert key is not None

                pubkey_text = gpg.export_keys(key.fingerprint)
                newsettings = {
                    "PGPPubKey": pubkey_text,
                    "PGPPubkeyFingerprint": key.fingerprint
                }
                self.db.updateEntries("settings", newsettings,
                                      {"market_id": self.market_id})
                self.settings.update(newsettings)

                self.log.info('PGP keypair generated.')
            except Exception as e:
                sys.exit("Encountered a problem with GPG: %s" % e)

        if not self.settings.get('pubkey'):
            # Generate Bitcoin keypair
            self._generate_new_keypair()

        if not self.settings.get('nickname'):
            newsettings = {'nickname': 'Default'}
            self.db.updateEntries('settings', newsettings,
                                  {"market_id": self.market_id})
            self.settings.update(newsettings)

        self.nickname = self.settings.get('nickname', '')
        self.secret = self.settings.get('secret', '')
        self.pubkey = self.settings.get('pubkey', '')
        self.privkey = self.settings.get('privkey')
        self.btc_pubkey = privkey_to_pubkey(self.privkey)
        self.guid = self.settings.get('guid', '')
        self.sin = self.settings.get('sin', '')
        self.bitmessage = self.settings.get('bitmessage', '')

        if not self.settings.get('bitmessage'):
            # Generate Bitmessage address
            if self.bitmessage_api is not None:
                self._generate_new_bitmessage_address()

        self.cryptor = Cryptor(pubkey_hex=self.pubkey, privkey_hex=self.secret)

        # In case user wants to override with command line passed bitmessage values
        if self.ob_ctx.bm_user is not None and \
           self.ob_ctx.bm_pass is not None and \
           self.ob_ctx.bm_port is not None:
            self._connect_to_bitmessage()

    def _generate_new_keypair(self):
        secret = str(random.randrange(2**256))
        self.secret = hashlib.sha256(secret).hexdigest()
        self.pubkey = privkey_to_pubkey(self.secret)
        self.privkey = random_key()
        self.btc_pubkey = privkey_to_pubkey(self.privkey)
        print 'PUBLIC KEY: ', self.btc_pubkey

        # Generate SIN
        sha_hash = hashlib.sha256()
        sha_hash.update(self.pubkey)
        ripe_hash = hashlib.new('ripemd160')
        ripe_hash.update(sha_hash.digest())

        self.guid = ripe_hash.hexdigest()
        self.sin = obelisk.EncodeBase58Check('\x0F\x02%s' % ripe_hash.digest())

        newsettings = {
            "secret": self.secret,
            "pubkey": self.pubkey,
            "privkey": self.privkey,
            "guid": self.guid,
            "sin": self.sin
        }
        self.db.updateEntries("settings", newsettings,
                              {"market_id": self.market_id})
        self.settings.update(newsettings)

    def _generate_new_bitmessage_address(self):
        # Use the guid generated previously as the key
        self.bitmessage = self.bitmessage_api.createRandomAddress(
            self.guid.encode('base64'), False, 1.05, 1.1111)
        newsettings = {"bitmessage": self.bitmessage}
        self.db.updateEntries("settings", newsettings,
                              {"market_id": self.market_id})
        self.settings.update(newsettings)

    def join_network(self, seeds=None, callback=None):
        if seeds is None:
            seeds = []

        self.log.info('Joining network')

        # Connect up through seed servers
        for idx, seed in enumerate(seeds):
            seeds[idx] = network_util.get_peer_url(seed, "12345")

        # Connect to persisted peers
        db_peers = self.get_past_peers()

        known_peers = list(set(seeds).union(db_peers))

        for known_peer in known_peers:
            self.dht.add_peer(self, known_peer)

        # Populate routing table by searching for self
        if known_peers:
            # Check every one second if we are connected
            # We could use a PeriodicCallback but I think this is simpler
            # since this will be repeated in most cases less than 10 times
            def join_callback():
                # If we are not connected to any node, reschedule a check
                if not self.dht.activePeers:
                    ioloop.IOLoop.instance().call_later(1, join_callback)
                else:
                    self.search_for_my_node()

            join_callback()

        if callback is not None:
            callback('Joined')

    def get_past_peers(self):
        result = self.db.selectEntries("peers", {"market_id": self.market_id})
        return [peer['uri'] for peer in result]

    def search_for_my_node(self):
        self.log.info('Searching for myself')
        self.dht._iterativeFind(self.guid, self.dht.knownNodes, 'findNode')

    def get_crypto_peer(self, guid=None, uri=None, pubkey=None, nickname=None):
        if guid == self.guid:
            self.log.error('Cannot get CryptoPeerConnection for your own node')
            return

        self.log.debug(
            'Getting CryptoPeerConnection'
            '\nGUID: %s'
            '\nURI: %s'
            '\nPubkey:%s'
            '\nNickname:%s', guid, uri, pubkey, nickname)

        return connection.CryptoPeerConnection(self,
                                               uri,
                                               pubkey,
                                               guid=guid,
                                               nickname=nickname)

    def respond_pubkey_if_mine(self, nickname, ident_pubkey):

        if ident_pubkey != self.pubkey:
            self.log.info("Public key does not match your identity")
            return

        # Return signed pubkey
        pubkey = self.cryptor.pubkey  # XXX: A Cryptor does not have such a field.
        ec_key = obelisk.EllipticCurveKey()
        ec_key.set_secret(self.secret)
        digest = obelisk.Hash(pubkey)
        signature = ec_key.sign(digest)

        # Send array of nickname, pubkey, signature to transport layer
        self.send(proto_response_pubkey(nickname, pubkey, signature))

    def send(self, data, send_to=None, callback=None):

        self.log.debug("Outgoing Data: %s %s", data, send_to)

        # Directed message
        if send_to is not None:

            peer = self.dht.routingTable.getContact(send_to)
            if peer is None:
                for activePeer in self.dht.activePeers:
                    if activePeer.guid == send_to:
                        peer = activePeer
                        break

            if peer:
                self.log.debug('Directed Data (%s): %s', send_to, data)
                try:
                    peer.send(data, callback=callback)
                except Exception as e:
                    self.log.error('Not sending message directly to peer %s',
                                   e)
            else:
                self.log.error('No peer found')

        else:
            # FindKey and then send

            for peer in self.dht.activePeers:
                try:
                    routing_peer = self.dht.routingTable.getContact(peer.guid)

                    if routing_peer is None:
                        self.dht.routingTable.addContact(peer)
                        routing_peer = peer

                    data['senderGUID'] = self.guid
                    data['pubkey'] = self.pubkey

                    def cb(msg):
                        self.log.debug('Message Back: \n%s', pformat(msg))

                    routing_peer.send(data, cb)

                except Exception:
                    self.log.info("Error sending over peer!")
                    traceback.print_exc()

    def _on_message(self, msg):

        # here goes the application callbacks
        # we get a "clean" msg which is a dict holding whatever

        pubkey = msg.get('pubkey')
        uri = msg.get('uri')
        guid = msg.get('senderGUID')
        nickname = msg.get('senderNick')[:120]

        self.log.info('On Message: %s', json.dumps(msg, ensure_ascii=False))
        self.dht.add_peer(self, uri, pubkey, guid, nickname)
        t = Thread(target=self.trigger_callbacks, args=(
            msg['type'],
            msg,
        ))
        t.start()

    def store(self, *args, **kwargs):
        """
        Store or republish data.

        Refer to the dht module (iterativeStore()) for further details.
        """
        self.dht.iterativeStore(*args, **kwargs)

    def shutdown(self):
        print "CryptoTransportLayer.shutdown()!"
        print "Notice: explicit DHT Shutdown not implemented."

        try:
            if self.bitmessage_api is not None:
                self.bitmessage_api.close()
        except Exception as e:
            # It might not even be open; we can't do much more on our
            # way out if exception is thrown here.
            self.log.error(
                "Could not shutdown bitmessage_api's ServerProxy: %s",
                e.message)
Esempio n. 36
0
class MDPBroker(object):
    """The MDP broker class.

    The broker routes messages from clients to appropriate workers based on the
    requested service.

    This base class defines the overall functionality and the API. Subclasses are
    ment to implement additional features (like logging).

    The broker uses ØMQ ROUTER sockets to deal witch clients and workers. These sockets
    are wrapped in pyzmq streams to fit well into IOLoop.

    .. note::

      The workers will *always* be served by the `main_ep` endpoint.

      In a two-endpoint setup clients will be handled via the `opt_ep`
      endpoint.

    :param context:    the context to use for socket creation.
    :type context:     zmq.Context
    :param main_ep:    the primary endpoint for workers and clients.
    :type main_ep:     str
    :param opt_ep:     is an optional 2nd endpoint.
    :type opt_ep:      str
    :param worker_q:   the class to be used for the worker-queue.
    :type worker_q:    class
    """

    CLIENT_PROTO = b'MDPC01'  #: Client protocol identifier
    WORKER_PROTO = b'MDPW01'  #: Worker protocol identifier

    def __init__(self, context, main_ep, opt_ep=None):
        """Init MDPBroker instance.
        """
        l = logger.Logger('mq_broker')
        self.log = l.get_logger()
        self.log.info("MDP broker startup...")

        socket = ZmqSocket(context, zmq.ROUTER)
        socket.bind(main_ep)
        self.main_stream = ZMQStream(socket)
        self.main_stream.on_recv(self.on_message)
        if opt_ep:
            socket = ZmqSocket(context, zmq.ROUTER)
            socket.bind(opt_ep)
            self.client_stream = ZMQStream(socket)
            self.client_stream.on_recv(self.on_message)
        else:
            self.client_stream = self.main_stream
        self.log.debug("Socket created...")
        self._workers = {}
        # services contain the worker queue and the request queue
        self._services = {}
        self._worker_cmds = {
            b'\x01': self.on_ready,
            b'\x03': self.on_reply,
            b'\x04': self.on_heartbeat,
            b'\x05': self.on_disconnect,
        }
        self.log.debug("Launch the timer...")
        self.hb_check_timer = PeriodicCallback(self.on_timer, HB_INTERVAL)
        self.hb_check_timer.start()
        self.log.info("MDP broker started")
        return

    def register_worker(self, wid, service):
        """Register the worker id and add it to the given service.

        Does nothing if worker is already known.

        :param wid:    the worker id.
        :type wid:     str
        :param service:    the service name.
        :type service:     str

        :rtype: None
        """
        self.log.debug(
            "Try to register a worker : wid={0}, service={1}".format(
                wid, service))
        try:
            if wid in self._workers:
                self.log.debug("Worker %s already registered" % service)
                return
            self._workers[wid] = WorkerRep(self.WORKER_PROTO, wid, service,
                                           self.main_stream)
            if service in self._services:
                wq, wr = self._services[service]
                wq.put(wid)
            else:
                q = ServiceQueue()
                q.put(wid)
                self._services[service] = (q, [])
            self.log.info("Registered worker : wid={0}, service={1}".format(
                wid, service))
        except:
            self.log.error(
                "Error while registering a worker : wid={0}, service={1}, trace={2}"
                .format(wid, service, traceback.format_exc()))
        return

    def unregister_worker(self, wid):
        """Unregister the worker with the given id.

        If the worker id is not registered, nothing happens.

        Will stop all timers for the worker.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """
        self.log.debug("Try to unregister a worker : wid={0}".format(wid))
        try:
            try:
                wrep = self._workers[wid]
            except KeyError:
                # not registered, ignore
                self.log.warning(
                    "The worker wid={0} is not registered, ignoring the unregister request"
                    .format(wid))
                return
            wrep.shutdown()
            service = wrep.service
            if service in self._services:
                wq, wr = self._services[service]
                wq.remove(wid)
            del self._workers[wid]
            self.log.info("Unregistered worker : wid={0}".format(wid))
        except:
            self.log.error(
                "Error while unregistering a worker : wid={0}, trace={1}".
                format(wid, traceback.format_exc()))
        return

    def disconnect(self, wid):
        """Send disconnect command and unregister worker.

        If the worker id is not registered, nothing happens.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """
        self.log.debug("Try to disconnect a worker : wid={0}".format(wid))
        try:
            try:
                wrep = self._workers[wid]
            except KeyError:
                # not registered, ignore
                self.log.warning(
                    "The worker wid={0} service={1} is not registered, ignoring the disconnect request"
                    .format(wid, wrep.service))
                return
            to_send = [wid, self.WORKER_PROTO, b'\x05']
            self.main_stream.send_multipart(to_send)
            self.log.info(
                "Request to unregister a worker : wid={0} service={1}".format(
                    wid, wrep.service))
        except:
            self.log.error(
                "Error while disconnecting a worker : wid={0}, trace={1}".
                format(wid, traceback.format_exc()))
        self.unregister_worker(wid)
        return

    def client_response(self, rp, service, msg):
        """Package and send reply to client.

        :param rp:       return address stack
        :type rp:        list of str
        :param service:  name of service
        :type service:   str
        :param msg:      message parts
        :type msg:       list of str

        :rtype: None
        """
        to_send = rp[:]
        to_send.extend([b'', self.CLIENT_PROTO, service])
        to_send.extend(msg)
        self.client_stream.send_multipart(to_send)
        return

    def shutdown(self):
        """Shutdown broker.

        Will unregister all workers, stop all timers and ignore all further
        messages.

        .. warning:: The instance MUST not be used after :func:`shutdown` has been called.

        :rtype: None
        """
        self.log.info("Shutdown starting...")
        try:
            self.log.debug("Closing the socket...")
            if self.client_stream == self.main_stream:
                self.client_stream = None
            self.main_stream.on_recv(None)
            self.main_stream.socket.setsockopt(zmq.LINGER, 0)
            self.main_stream.socket.close()
            self.main_stream.close()
            self.main_stream = None
            if self.client_stream:
                self.client_stream.on_recv(None)
                self.client_stream.socket.setsockopt(zmq.LINGER, 0)
                self.client_stream.socket.close()
                self.client_stream.close()
                self.client_stream = None
            self.log.debug("Clean workers and services...")
            self._workers = {}
            self._services = {}
        except:
            self.log.error("Error during shutdown : trace={0}".format(
                traceback.format_exc()))
        return

    def on_timer(self):
        """Method called on timer expiry.

        Checks which workers are dead and unregisters them.

        :rtype: None
        """
        self.log.debug("Check for dead workers...")
        for wrep in list(self._workers.values()):
            if not wrep.is_alive():
                self.log.info(
                    "A worker seems to be dead : wid={0} service={1}".format(
                        wrep.id, wrep.service))
                self.unregister_worker(wrep.id)
        return

    def on_ready(self, rp, msg):
        """Process worker READY command.

        Registers the worker for a service.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        ret_id = rp[0]
        self.register_worker(ret_id, msg[0])
        return

    def on_reply(self, rp, msg):
        """Process worker REPLY command.

        Route the `msg` to the client given by the address(es) in front of `msg`.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        ret_id = rp[0]
        # make worker available again
        try:
            wrep = self._workers[ret_id]
            service = wrep.service
            wq, wr = self._services[service]
            cp, msg = split_address(msg)
            self.client_response(cp, service, msg)
            wq.put(wrep.id)
            if wr:
                proto, rp, msg = wr.pop(0)
                self.on_client(proto, rp, msg)
        except KeyError:
            # unknown service
            self.disconnect(ret_id)
        return

    def on_heartbeat(self, rp, msg):
        """Process worker HEARTBEAT command.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        ret_id = rp[0]
        try:
            worker = self._workers[ret_id]
            if worker.is_alive():
                worker.on_heartbeat()
        except KeyError:
            # ignore HB for unknown worker
            pass
        return

    def on_disconnect(self, rp, msg):
        """Process worker DISCONNECT command.

        Unregisters the worker who sent this message.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        wid = rp[0]
        self.log.info("A worker disconnects itself : wid={0}".format(wid))
        self.unregister_worker(wid)
        return

    def on_mmi(self, rp, service, msg):
        """Process MMI request.

        For now only mmi.service is handled.

        :param rp:      return address stack
        :type rp:       list of str
        :param service: the protocol id sent
        :type service:  str
        :param msg:     message parts
        :type msg:      list of str

        :rtype: None
        """
        if service == b'mmi.service':
            s = msg[0]
            ret = b'404'
            for wr in list(self._workers.values()):
                if s == wr.service:
                    ret = b'200'
                    break
            self.client_response(rp, service, [ret])
        elif service == b'mmi.services':
            ret = []
            for wr in list(self._workers.values()):
                ret.append(wr.service)
            self.client_response(rp, service, [b', '.join(ret)])
        else:
            self.client_response(rp, service, [b'501'])
        return

    def on_client(self, proto, rp, msg):
        """Method called on client message.

        Frame 0 of msg is the requested service.
        The remaining frames are the request to forward to the worker.

        .. note::

           If the service is unknown to the broker the message is
           ignored.

        .. note::

           If currently no worker is available for a known service,
           the message is queued for later delivery.

        If a worker is available for the requested service, the
        message is repackaged and sent to the worker. The worker in
        question is removed from the pool of available workers.

        If the service name starts with `mmi.`, the message is passed to
        the internal MMI_ handler.

        .. _MMI: http://rfc.zeromq.org/spec:8

        :param proto: the protocol id sent
        :type proto:  str
        :param rp:    return address stack
        :type rp:     list of str
        :param msg:   message parts
        :type msg:    list of str

        :rtype: None
        """
        service = msg.pop(0)
        if service.startswith(b'mmi.'):
            self.on_mmi(rp, service, msg)
            return
        try:
            wq, wr = self._services[service]
            wid = wq.get()
            if not wid:
                # no worker ready
                # queue message
                msg.insert(0, service)
                wr.append((proto, rp, msg))
                return
            wrep = self._workers[wid]
            to_send = [wrep.id, b'', self.WORKER_PROTO, b'\x02']
            to_send.extend(rp)
            to_send.append(b'')
            to_send.extend(msg)
            self.main_stream.send_multipart(to_send)
        except KeyError:
            # unknwon service
            # ignore request
            msg = "broker has no service {0}".format(service)
            print(msg)
            self.log.warning(msg)
        return

    def on_worker(self, proto, rp, msg):
        """Method called on worker message.

        Frame 0 of msg is the command id.
        The remaining frames depend on the command.

        This method determines the command sent by the worker and
        calls the appropriate method. If the command is unknown the
        message is ignored and a DISCONNECT is sent.

        :param proto: the protocol id sent
        :type proto:  str
        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        cmd = msg.pop(0)
        if cmd in self._worker_cmds:
            fnc = self._worker_cmds[cmd]
            fnc(rp, msg)
        else:
            # ignore unknown command
            # DISCONNECT worker
            self.log.warning(
                "Unknown command from worker (it will be disconnect) : wid={0}, cmd={1}"
                .format(rp[0], cmd))
            self.disconnect(rp[0])
        return

    def on_message(self, msg):
        """Processes given message.

        Decides what kind of message it is -- client or worker -- and
        calls the appropriate method. If unknown, the message is
        ignored.

        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        self.log.debug("Message received: {0}".format(msg))
        rp, msg = split_address(msg)
        # dispatch on first frame after path
        t = msg.pop(0)
        if t.startswith(b'MDPW'):
            self.on_worker(t, rp, msg)
        elif t.startswith(b'MDPC'):
            self.on_client(t, rp, msg)
        else:
            self.log.warning("Broker unknown Protocol: {0}".format(t))
        return
Esempio n. 37
0
class MDPBroker(object):

    """The MDP broker class.

    The broker routes messages from clients to appropriate workers based on the
    requested service.

    This base class defines the overall functionality and the API. Subclasses are
    meant to implement additional features (like logging).

    The broker uses ZMQ ROUTER sockets to deal with clients and workers. These sockets
    are wrapped in pyzmq streams to fit well into IOLoop.

    .. note::

      The workers will *always* be served by the `main_ep` endpoint.

      In a two-endpoint setup clients will be handled via the `opt_ep`
      endpoint.

    :param context:    the context to use for socket creation.
    :type context:     zmq.Context
    :param main_ep:    the primary endpoint for workers.
    :type main_ep:     str
    :param client_ep:  the clients endpoint
    :type client_ep:   str
    :param hb_ep:      the heart beat endpoint for workers.
    :type hb_ep:       str
    :param service_q:  the class to be used for the service worker-queue.
    :type service_q:   class
    """

    CLIENT_PROTO = C_CLIENT  #: Client protocol identifier
    WORKER_PROTO = W_WORKER  #: Worker protocol identifier


    def __init__(self, context, main_ep, client_ep, hb_ep, service_q=None):
        """Init MDPBroker instance.
        """

        if service_q is None:
            self.service_q = ServiceQueue
        else:
            self.service_q = service_q

        #
        # Setup the zmq sockets.
        #
        socket = context.socket(zmq.ROUTER)
        socket.bind(main_ep)
        self.main_stream = ZMQStream(socket)
        self.main_stream.on_recv(self.on_message)

        socket = context.socket(zmq.ROUTER)
        socket.bind(client_ep)
        self.client_stream = ZMQStream(socket)
        self.client_stream.on_recv(self.on_message)

        socket = context.socket(zmq.ROUTER)
        socket.bind(hb_ep)
        self.hb_stream = ZMQStream(socket)
        self.hb_stream.on_recv(self.on_message)

        self._workers = {}

        #
        # services contain the service queue and the request queue
        #
        self._services = {}

        #
        # Mapping of worker commands and callbacks.
        #
        self._worker_cmds = {
            W_READY: self.on_ready,
            W_REPLY: self.on_reply,
            W_HEARTBEAT: self.on_heartbeat,
            W_DISCONNECT: self.on_disconnect,
        }

        #
        # 'Cleanup' timer for workers without heartbeat.
        #
        self.hb_check_timer = PeriodicCallback(self.on_timer, HB_INTERVAL)
        self.hb_check_timer.start()

    def register_worker(self, wid, service):
        """Register the worker id and add it to the given service.

        Does nothing if worker is already known.

        :param wid:    the worker id.
        :type wid:     str
        :param service:    the service name.
        :type service:     str

        :rtype: None
        """

        if wid in self._workers:
            logging.info('Worker {} already registered'.format(service))
            return

        logging.info('Registering new worker {}'.format(service))

        self._workers[wid] = WorkerRep(self.WORKER_PROTO, wid, service, self.main_stream)

        if service in self._services:
            wq, wr = self._services[service]
            wq.put(wid)
        else:
            q = self.service_q()
            q.put(wid)
            self._services[service] = (q, [])

    def unregister_worker(self, wid):
        """Unregister the worker with the given id.

        If the worker id is not registered, nothing happens.

        Will stop all timers for the worker.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """

        try:
            wrep = self._workers[wid]
        except KeyError:
            #
            # Not registered, ignore
            #
            return

        logging.info('Unregistering worker {}'.format(wrep.service))

        wrep.shutdown()

        service = wrep.service
        if service in self._services:
            wq, wr = self._services[service]
            wq.remove(wid)

        del self._workers[wid]

    def disconnect(self, wid):
        """Send disconnect command and unregister worker.

        If the worker id is not registered, nothing happens.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """

        try:
            wrep = self._workers[wid]
        except KeyError:
            #
            # Not registered, ignore
            #
            return

        logging.info('Disconnecting worker {}'.format(wrep.service))

        to_send = [wid, self.WORKER_PROTO, W_DISCONNECT]
        self.main_stream.send_multipart(to_send)

        self.unregister_worker(wid)

    def client_response(self, rp, service, msg):
        """Package and send reply to client.

        :param rp:       return address stack
        :type rp:        list of str
        :param service:  name of service
        :type service:   str
        :param msg:      message parts
        :type msg:       list of str

        :rtype: None
        """

        if service == MMI_SERVICE:
            logging.debug('Send reply to client from worker {}'.format(service))
        else:
            logging.info('Send reply to client from worker {}'.format(service))

        to_send = rp[:]
        to_send.extend([EMPTY_FRAME, self.CLIENT_PROTO, service])
        to_send.extend(msg)
        self.client_stream.send_multipart(to_send)

    def shutdown(self):
        """Shutdown broker.

        Will unregister all workers, stop all timers and ignore all further
        messages.

        .. warning:: The instance MUST not be used after :func:`shutdown` has been called.

        :rtype: None
        """

        logging.debug('Shutting down')

        self.main_stream.on_recv(None)
        self.main_stream.socket.setsockopt(zmq.LINGER, 0)
        self.main_stream.socket.close()
        self.main_stream.close()
        self.main_stream = None

        self.client_stream.on_recv(None)
        self.client_stream.socket.setsockopt(zmq.LINGER, 0)
        self.client_stream.socket.close()
        self.client_stream.close()
        self.client_stream = None

        self.hb_stream.on_recv(None)
        self.hb_stream.socket.setsockopt(zmq.LINGER, 0)
        self.hb_stream.socket.close()
        self.hb_stream.close()
        self.hb_stream = None

        self._workers = {}
        self._services = {}

    def on_timer(self):
        """Method called on timer expiry.

        Checks which workers are dead and unregisters them.

        :rtype: None
        """

        #
        #  Remove 'dead' (not responding to heartbeats) workers.
        #
        for wrep in self._workers.values():
            if not wrep.is_alive():
                self.unregister_worker(wrep.id)

    def on_ready(self, rp, msg):
        """Process worker READY command.

        Registers the worker for a service.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        ret_id = rp[0]
        logging.debug('Worker sent ready msg: {} ,{}'.format(rp, msg))
        self.register_worker(ret_id, msg[0])

    def on_reply(self, rp, msg):
        """Process worker REPLY command.

        Route the `msg` to the client given by the address(es) in front of `msg`.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        ret_id = rp[0]
        wrep = self._workers.get(ret_id)

        if not wrep:
            #
            # worker not found, ignore message
            #
            logging.error(
                "Worker with return id {} not found. Ignore message.".format(
                    ret_id))
            return

        service = wrep.service
        logging.info("Worker {} sent reply.".format(service))

        try:
            wq, wr = self._services[service]

            #
            # Send response to client
            #
            cp, msg = split_address(msg)
            self.client_response(cp, service, msg)

            #
            # make worker available again
            #
            wq.put(wrep.id)

            if wr:
                logging.info("Sending queued message to worker {}".format(service))
                proto, rp, msg = wr.pop(0)
                self.on_client(proto, rp, msg)
        except KeyError:
            #
            # unknown service
            #
            self.disconnect(ret_id)

    def on_heartbeat(self, rp, msg):
        """Process worker HEARTBEAT command.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        #
        # Note:
        # The modified heartbeat of the worker is sent over a separate socket
        # stream (self.hb_stream). Therefore the ret_id is wrong. Instead the
        # worker sends its id in the message.
        #
        if len(msg) > 0:
            ret_id = msg[0]
        else:
            ret_id = rp[0]

        try:
            worker = self._workers[ret_id]
            if worker.is_alive():
                worker.on_heartbeat()
        except KeyError:
            #
            # Ignore HB for unknown worker
            #
            pass

    def on_disconnect(self, rp, msg):
        """Process worker DISCONNECT command.

        Unregisters the worker who sent this message.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        wid = rp[0]
        self.unregister_worker(wid)

    def on_mmi(self, rp, service, msg):
        """Process MMI request.

        mmi.service is used for querying if a specific service is available.
        mmi.services is used for querying the list of services available.

        :param rp:      return address stack
        :type rp:       list of str
        :param service: the protocol id sent
        :type service:  str
        :param msg:     message parts
        :type msg:      list of str

        :rtype: None
        """

        if service == MMI_SERVICE:
            s = msg[0]
            ret = [UNKNOWN_SERVICE]

            for wr in self._workers.values():
                if s == wr.service:
                    ret = [KNOWN_SERVICE]
                    break

        elif service == MMI_SERVICES:
            #
            # Return list of services
            #
            ret = [wr.service for wr in self._workers.values()]

        elif service == MMI_TUNNELS:
            #
            # Read the tunnel files, and send back the network info.
            #
            tunnel_paths = glob.glob(os.path.expanduser("~/tunnel_port_*.txt"))
            tunnels_data = {}
            for path in tunnel_paths:
                filename = os.path.split(path)[-1]
                service_name = filename[-7:-4]
                with open(path, 'r') as f:
                    tunnels_data[service_name] = json.load(f)
            ret = [cPickle.dumps(tunnels_data)]
        else:
            #
            # Unknown command.
            #
            ret = [UNKNOWN_COMMAND]

        self.client_response(rp, service, ret)

    def on_client(self, proto, rp, msg):
        """Method called on client message.

        Frame 0 of msg is the requested service.
        The remaining frames are the request to forward to the worker.

        .. note::

           If the service is unknown to the broker the message is
           ignored.

        .. note::

           If currently no worker is available for a known service,
           the message is queued for later delivery.

        If a worker is available for the requested service, the
        message is repackaged and sent to the worker. The worker in
        question is removed from the pool of available workers.

        If the service name starts with `mmi.`, the message is passed to
        the internal MMI_ handler.

        .. _MMI: http://rfc.zeromq.org/spec:8

        :param proto: the protocol id sent
        :type proto:  str
        :param rp:    return address stack
        :type rp:     list of str
        :param msg:   message parts
        :type msg:    list of str

        :rtype: None
        """

        service = msg.pop(0)

        if service.startswith(b'mmi.'):
            logging.debug("Got MMI message from client.")
            self.on_mmi(rp, service, msg)
            return

        logging.info("Client sends message (possibly queued) to worker {}".format(service))

        try:
            wq, wr = self._services[service]
            wid = wq.get()

            if not wid:
                #
                # No worker ready. Queue message
                #
                logging.info("Worker {} missing. Queuing message.".format(service))
                msg.insert(0, service)
                wr.append((proto, rp, msg))
                return

            wrep = self._workers[wid]
            to_send = [wrep.id, EMPTY_FRAME, self.WORKER_PROTO, W_REQUEST]
            to_send.extend(rp)
            to_send.append(EMPTY_FRAME)
            to_send.extend(msg)
            self.main_stream.send_multipart(to_send)

        except KeyError:
            #
            # Unknwon service. Ignore request
            #
            logging.info('broker has no service "{}"'.format(service))

    def on_worker(self, proto, rp, msg):
        """Method called on worker message.

        Frame 0 of msg is the command id.
        The remaining frames depend on the command.

        This method determines the command sent by the worker and
        calls the appropriate method. If the command is unknown the
        message is ignored and a DISCONNECT is sent.

        :param proto: the protocol id sent
        :type proto:  str
        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        cmd = msg.pop(0)
        if cmd in self._worker_cmds:
            fnc = self._worker_cmds[cmd]
            fnc(rp, msg)
        else:
            #
            # Ignore unknown command. Disconnect worker.
            #
            logging.error("Unknown worker command: {}".format(cmd))
            self.disconnect(rp[0])

    def on_message(self, msg):
        """Processes given message.

        Decides what kind of message it is -- client or worker -- and
        calls the appropriate method. If unknown, the message is
        ignored.

        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        rp, msg = split_address(msg)

        try:
            #
            # Dispatch on first frame after path
            #
            t = msg.pop(0)
            if t.startswith(b'MDPW'):
                logging.debug('Recieved message from worker {}'.format(rp))
                self.on_worker(t, rp, msg)
            elif t.startswith(b'MDPC'):
                logging.debug('Recieved message from client {}'.format(rp))
                self.on_client(t, rp, msg)
            else:
                logging.error('Broker unknown Protocol: "{}"'.format(t))
        except:
            logging.error(
                "An error occured while trying to process message: rp: {}, msg: {}\n{}".format(
                    rp, msg, traceback.format_exc()
                )
            )
Esempio n. 38
0
class WorkerRep(object):

    """Helper class to represent a worker in the broker.

    Instances of this class are used to track the state of the attached worker
    and carry the timers for incoming and outgoing heartbeats.

    :param proto:    the worker protocol id.
    :type wid:       str
    :param wid:      the worker id.
    :type wid:       str
    :param service:  service this worker serves
    :type service:   str
    :param stream:   the ZMQStream used to send messages
    :type stream:    ZMQStream
    """

    def __init__(self, proto, wid, service, stream):
        self.proto = proto
        self.id = wid
        self.service = service
        self.curr_liveness = HB_LIVENESS
        self.stream = stream

        self.send_uniqueid()

        self.hb_out_timer = PeriodicCallback(self.send_hb, HB_INTERVAL)
        self.hb_out_timer.start()

    def send_uniqueid(self):
        """Called on W_READY from worker.

        Sends unique id to worker.
        """

        logging.debug('Broker to Worker {} sending unique id: {}'.format(
            self.service, self.id))

        msg = [self.id, EMPTY_FRAME, self.proto, W_READY, self.id]
        self.stream.send_multipart(msg)

    def send_hb(self):
        """Called on every HB_INTERVAL.

        Decrements the current liveness by one.

        Sends heartbeat to worker.
        """

        self.curr_liveness -= 1
        logging.debug('Broker to Worker {} HB tick, current liveness: {}'.format(
            self.service, self.curr_liveness))

        msg = [self.id, EMPTY_FRAME, self.proto, W_HEARTBEAT]
        self.stream.send_multipart(msg)

    def on_heartbeat(self):
        """Called when a heartbeat message from the worker was received.

        Sets current liveness to HB_LIVENESS.
        """

        logging.debug('Received HB from worker {}'.format(self.service))

        self.curr_liveness = HB_LIVENESS

    def is_alive(self):
        """Returns True when the worker is considered alive.
        """

        return self.curr_liveness > 0

    def shutdown(self):
        """Cleanup worker.

        Stops timer.
        """

        logging.info('Shuting down worker {}'.format(self.service))

        self.hb_out_timer.stop()
        self.hb_out_timer = None
        self.stream = None
Esempio n. 39
0
File: server.py Progetto: postsql/cc
class CCServer(skytools.BaseScript):
    """Listens on single ZMQ sockets, dispatches messages to handlers.

    Config::
        ## Parameters for CCServer ##

        # listening socket for this CC instance
        cc-socket = tcp://127.0.0.1:22632

        # zmq customization:
        #zmq_nthreads = 1
        #zmq_linger = 500
        #zmq_hwm = 100

        #zmq_tcp_keepalive = 1
        #zmq_tcp_keepalive_intvl = 15
        #zmq_tcp_keepalive_idle = 240
        #zmq_tcp_keepalive_cnt = 4
    """
    extra_ini = """
    Extra segments::

        # map req prefix to handler segment
        [routes]
        log = h:locallog

        # segment for specific handler
        [h:locallog]
        handler = cc.handler.locallogger
    """

    log = skytools.getLogger('CCServer')

    cf_defaults = {
        'logfmt_console': LOG.fmt,
        'logfmt_file': LOG.fmt,
        'logfmt_console_verbose': LOG.fmt_v,
        'logfmt_file_verbose': LOG.fmt_v,
        'logdatefmt_console': LOG.datefmt,
        'logdatefmt_file': LOG.datefmt,
        'logdatefmt_console_verbose': LOG.datefmt_v,
        'logdatefmt_file_verbose': LOG.datefmt_v,
    }

    __version__ = __version__

    stat_level = 1

    zmq_nthreads = 1
    zmq_linger = 500
    zmq_hwm = 100
    zmq_rcvbuf = 0  # means no change
    zmq_sndbuf = 0  # means no change

    zmq_tcp_keepalive = 1
    zmq_tcp_keepalive_intvl = 15
    zmq_tcp_keepalive_idle = 4 * 60
    zmq_tcp_keepalive_cnt = 4

    def reload(self):
        super(CCServer, self).reload()

        self.zmq_nthreads = self.cf.getint('zmq_nthreads', self.zmq_nthreads)
        self.zmq_hwm = self.cf.getint('zmq_hwm', self.zmq_hwm)
        self.zmq_linger = self.cf.getint('zmq_linger', self.zmq_linger)
        self.zmq_rcvbuf = hsize_to_bytes(
            self.cf.get('zmq_rcvbuf', str(self.zmq_rcvbuf)))
        self.zmq_sndbuf = hsize_to_bytes(
            self.cf.get('zmq_sndbuf', str(self.zmq_sndbuf)))

        self.zmq_tcp_keepalive = self.cf.getint('zmq_tcp_keepalive',
                                                self.zmq_tcp_keepalive)
        self.zmq_tcp_keepalive_intvl = self.cf.getint(
            'zmq_tcp_keepalive_intvl', self.zmq_tcp_keepalive_intvl)
        self.zmq_tcp_keepalive_idle = self.cf.getint(
            'zmq_tcp_keepalive_idle', self.zmq_tcp_keepalive_idle)
        self.zmq_tcp_keepalive_cnt = self.cf.getint('zmq_tcp_keepalive_cnt',
                                                    self.zmq_tcp_keepalive_cnt)

    def print_ini(self):
        super(CCServer, self).print_ini()

        self._print_ini_frag(self.extra_ini)

    def startup(self):
        """Setup sockets and handlers."""

        super(CCServer, self).startup()

        self.log.info("C&C server version %s starting up..", self.__version__)

        self.xtx = CryptoContext(self.cf)
        self.zctx = zmq.Context(self.zmq_nthreads)
        self.ioloop = IOLoop.instance()

        self.local_url = self.cf.get('cc-socket')

        self.cur_role = self.cf.get('cc-role', 'insecure')
        if self.cur_role == 'insecure':
            self.log.warning(
                'CC is running in insecure mode, please add "cc-role = local" or "cc-role = remote" option to config'
            )

        self.stat_level = self.cf.getint('cc-stats', 1)
        if self.stat_level < 1:
            self.log.warning('CC statistics level too low: %d',
                             self.stat_level)

        # initialize local listen socket
        s = self.zctx.socket(zmq.XREP)
        s.setsockopt(zmq.LINGER, self.zmq_linger)
        s.setsockopt(zmq.HWM, self.zmq_hwm)
        if self.zmq_rcvbuf > 0:
            s.setsockopt(zmq.RCVBUF, self.zmq_rcvbuf)
        if self.zmq_sndbuf > 0:
            s.setsockopt(zmq.SNDBUF, self.zmq_sndbuf)
        if self.zmq_tcp_keepalive > 0:
            if getattr(zmq, 'TCP_KEEPALIVE', -1) > 0:
                s.setsockopt(zmq.TCP_KEEPALIVE, self.zmq_tcp_keepalive)
                s.setsockopt(zmq.TCP_KEEPALIVE_INTVL,
                             self.zmq_tcp_keepalive_intvl)
                s.setsockopt(zmq.TCP_KEEPALIVE_IDLE,
                             self.zmq_tcp_keepalive_idle)
                s.setsockopt(zmq.TCP_KEEPALIVE_CNT, self.zmq_tcp_keepalive_cnt)
            else:
                self.log.info("TCP_KEEPALIVE not available")
        s.bind(self.local_url)
        self.local = CCStream(s, self.ioloop, qmaxsize=self.zmq_hwm)
        self.local.on_recv(self.handle_cc_recv)

        self.handlers = {}
        self.routes = {}
        rcf = skytools.Config('routes', self.cf.filename, ignore_defs=True)
        for r, hnames in rcf.cf.items('routes'):
            self.log.info('New route: %s = %s', r, hnames)
            for hname in [hn.strip() for hn in hnames.split(',')]:
                h = self.get_handler(hname)
                self.add_handler(r, h)

        self.stimer = PeriodicCallback(self.send_stats, 30 * 1000, self.ioloop)
        self.stimer.start()

    def send_stats(self):
        if self.stat_level == 0:
            return

        # make sure we have something to send
        self.stat_increase('count', 0)

        # combine our stats with global stats
        self.combine_stats(reset_stats())

        super(CCServer, self).send_stats()

    def combine_stats(self, other):
        for k, v in other.items():
            self.stat_inc(k, v)

    def get_handler(self, hname):
        if hname in self.handlers:
            h = self.handlers[hname]
        else:
            hcf = self.cf.clone(hname)

            # renamed option: plugin->handler
            htype = hcf.get('plugin', '?')
            if htype == '?':
                htype = hcf.get('handler')

            cls = cc_handler_lookup(htype, self.cur_role)
            h = cls(hname, hcf, self)
            self.handlers[hname] = h
        return h

    def add_handler(self, rname, handler):
        """Add route to handler"""

        if rname == '*':
            r = ()
        else:
            r = tuple(rname.split('.'))
        self.log.debug('New route for handler: %r -> %s', r, handler.hname)
        rhandlers = self.routes.setdefault(r, [])
        rhandlers.append(handler)

    def handle_cc_recv(self, zmsg):
        """Got message from client, pick handler."""

        start = time.time()
        self.stat_inc('count')
        self.log.trace('got msg: %r', zmsg)
        try:
            cmsg = CCMessage(zmsg)
        except:
            self.log.exception('Invalid CC message')
            self.stat_increase('count.invalid')
            return

        try:
            dst = cmsg.get_dest()
            size = cmsg.get_size()
            route = tuple(dst.split('.'))

            # find and run all handlers that match
            cnt = 0
            for n in range(0, 1 + len(route)):
                p = route[:n]
                for h in self.routes.get(p, []):
                    self.log.trace('calling handler %s', h.hname)
                    h.handle_msg(cmsg)
                    cnt += 1
            if cnt == 0:
                self.log.warning('dropping msg, no route: %s', dst)
                stat = 'dropped'
            else:
                stat = 'ok'

        except Exception:
            self.log.exception('crashed, dropping msg: %s', dst)
            stat = 'crashed'

        # update stats
        taken = time.time() - start
        self.stat_inc('bytes', size)
        self.stat_inc('seconds', taken)
        self.stat_inc('count.%s' % stat)
        self.stat_inc('bytes.%s' % stat, size)
        self.stat_inc('seconds.%s' % stat, taken)
        if self.stat_level > 1:
            self.stat_inc('count.%s.msg.%s' % (stat, dst))
            self.stat_inc('bytes.%s.msg.%s' % (stat, dst), size)
            self.stat_inc('seconds.%s.msg.%s' % (stat, dst), taken)

    def work(self):
        """Default work loop simply runs ioloop."""
        self.set_single_loop(1)
        self.log.info('Starting IOLoop')
        try:
            self.ioloop.start()
        except zmq.ZMQError, d:
            # ZMQ gets surprised by EINTR
            if d.errno == errno.EINTR:
                return 1
            raise
Esempio n. 40
0
class CloneServer(object):

    # Our server is defined by these properties
    ctx = None                  # Context wrapper
    kvmap = None                # Key-value store
    loop = None                 # IOLoop reactor
    port = None                 # Main port we're working on
    sequence = 0                # How many updates we're at
    snapshot = None             # Handle snapshot requests
    publisher = None            # Publish updates to clients
    collector = None            # Collect updates from clients

    def __init__(self, port=5556):
        self.port = port
        self.ctx = zmq.Context()
        self.kvmap = {}
        self.loop = IOLoop.instance()

        # Set up our clone server sockets
        self.snapshot  = self.ctx.socket(zmq.ROUTER)
        self.publisher = self.ctx.socket(zmq.PUB)
        self.collector = self.ctx.socket(zmq.PULL)
        self.snapshot.bind("tcp://*:%d" % self.port)
        self.publisher.bind("tcp://*:%d" % (self.port + 1))
        self.collector.bind("tcp://*:%d" % (self.port + 2))

        # Wrap sockets in ZMQStreams for IOLoop handlers
        self.snapshot = ZMQStream(self.snapshot)
        self.publisher = ZMQStream(self.publisher)
        self.collector = ZMQStream(self.collector)

        # Register our handlers with reactor
        self.snapshot.on_recv(self.handle_snapshot)
        self.collector.on_recv(self.handle_collect)
        self.flush_callback = PeriodicCallback(self.flush_ttl, 1000)

        # basic log formatting:
        logging.basicConfig(format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S",
                level=logging.INFO)


    def start(self):
        # Run reactor until process interrupted
        self.flush_callback.start()
        try:
            self.loop.start()
        except KeyboardInterrupt:
            pass

    def handle_snapshot(self, msg):
        """snapshot requests"""
        if len(msg) != 3 or msg[1] != "ICANHAZ?":
            print "E: bad request, aborting"
            dump(msg)
            self.loop.stop()
            return
        identity, request, subtree = msg
        if subtree:
            # Send state snapshot to client
            route = Route(self.snapshot, identity, subtree)

            # For each entry in kvmap, send kvmsg to client
            for k,v in self.kvmap.items():
                send_single(k,v,route)

            # Now send END message with sequence number
            logging.info("I: Sending state shapshot=%d" % self.sequence)
            self.snapshot.send(identity, zmq.SNDMORE)
            kvmsg = KVMsg(self.sequence)
            kvmsg.key = "KTHXBAI"
            kvmsg.body = subtree
            kvmsg.send(self.snapshot)

    def handle_collect(self, msg):
        """Collect updates from clients"""
        kvmsg = KVMsg.from_msg(msg)
        self.sequence += 1
        kvmsg.sequence = self.sequence
        kvmsg.send(self.publisher)
        ttl = float(kvmsg.get('ttl', 0))
        if ttl:
            kvmsg['ttl'] = time.time() + ttl
        kvmsg.store(self.kvmap)
        logging.info("I: publishing update=%d", self.sequence)

    def flush_ttl(self):
        """Purge ephemeral values that have expired"""
        for key,kvmsg in self.kvmap.items():
            self.flush_single(kvmsg)

    def flush_single(self, kvmsg):
        """If key-value pair has expired, delete it and publish the fact
        to listening clients."""
        ttl = float(kvmsg.get('ttl', 0))
        if ttl and ttl <= time.time():
            kvmsg.body = ""
            self.sequence += 1
            kvmsg.sequence = self.sequence
            kvmsg.send(self.publisher)
            del self.kvmap[kvmsg.key]
            logging.info("I: publishing delete=%d", self.sequence)
Esempio n. 41
0
class CloneServer(object):
    # Our server is defined by these properties
    ctx = None  # Context wrapper
    kvmap = None  # Key-value store
    loop = None  # IOLoop reactor
    port = None  # Main port we're working on
    sequence = 0  # How many updates we're at
    snapshot = None  # Handle snapshot requests
    publisher = None  # Publish updates to clients
    collector = None  # Collect updates from clients

    def __init__(self, port=5556):
        self.port = port
        self.ctx = zmq.Context()
        self.kvmap = {}
        self.loop = IOLoop.instance()

        # Set up our clone server sockets
        self.snapshot = self.ctx.socket(zmq.ROUTER)
        self.publisher = self.ctx.socket(zmq.PUB)
        self.collector = self.ctx.socket(zmq.PULL)
        self.snapshot.bind(f"tcp://*:{self.port:d}")
        self.publisher.bind(f"tcp://*:{self.port + 1:d}")
        self.collector.bind(f"tcp://*:{self.port + 2:d}")

        # Wrap sockets in ZMQStreams for IOLoop handlers
        self.snapshot = ZMQStream(self.snapshot)
        self.publisher = ZMQStream(self.publisher)
        self.collector = ZMQStream(self.collector)

        # Register our handlers with reactor
        self.snapshot.on_recv(self.handle_snapshot)
        self.collector.on_recv(self.handle_collect)
        self.flush_callback = PeriodicCallback(self.flush_ttl, 1000)

        # basic log formatting:
        logging.basicConfig(format="%(asctime)s %(message)s",
                            datefmt="%Y-%m-%d %H:%M:%S",
                            level=logging.INFO)

    def start(self):
        # Run reactor until process interrupted
        self.flush_callback.start()
        try:
            self.loop.start()
        except KeyboardInterrupt:
            pass

    def handle_snapshot(self, msg):
        """snapshot requests"""
        if len(msg) != 3 or msg[1] != b"ICANHAZ?":
            print("E: bad request, aborting")
            dump(msg)
            self.loop.stop()
            return
        [identity, request, subtree] = msg
        if subtree:
            # Send state snapshot to client
            route = Route(self.snapshot, identity, subtree)

            # For each entry in kvmap, send kvmsg to client
            for k, v in self.kvmap.items():
                send_single(k, v, route)

            # Now send END message with sequence number
            logging.info(f"I: Sending state shapshot={self.sequence:d}")
            self.snapshot.send(identity, zmq.SNDMORE)
            kvmsg = KVMsg(self.sequence)
            kvmsg.key = b"KTHXBAI"
            kvmsg.body = subtree
            kvmsg.send(self.snapshot)

    def handle_collect(self, msg):
        """Collect updates from clients"""
        kvmsg = KVMsg.from_msg(msg)
        self.sequence += 1
        kvmsg.sequence = self.sequence
        kvmsg.send(self.publisher)
        ttl = float(kvmsg.get(b'ttl', 0))
        if ttl:
            kvmsg[b'ttl'] = b'%f' % (time.time() + ttl)
        kvmsg.store(self.kvmap)
        logging.info(f"I: publishing update={self.sequence:d}")

    def flush_ttl(self):
        """Purge ephemeral values that have expired"""
        # used list() to exhaust the iterator before deleting from the dict
        for key, kvmsg in list(self.kvmap.items()):
            self.flush_single(kvmsg)

    def flush_single(self, kvmsg):
        """If key-value pair has expired, delete it and publish the fact
        to listening clients."""
        ttl = float(kvmsg.get(b'ttl', 0))
        if ttl and ttl <= time.time():
            kvmsg.body = b""
            self.sequence += 1
            kvmsg.sequence = self.sequence
            kvmsg.send(self.publisher)
            del self.kvmap[kvmsg.key]
            logging.info(f"I: publishing delete={self.sequence:d}")
Esempio n. 42
0
class MNWorker(MN_object):
    """Class for the MN worker side.

    Thin encapsulation of a zmq.DEALER socket.
    Provides a send method with optional timeout parameter.

    Will use a timeout to indicate a broker failure.

    :param context:    the context to use for socket creation.
    :type context:     zmq.Context
    :param endpoint:   endpoint to connect to.
    :type endpoint:    str
    :param service:    the name of the service we support.
    :type service:     byte-string
    """

    _proto_version = b'MNPW01'  # worker protocol version

    def __init__(self, context, endpoint, service, worker_type, address,
                 protocols):
        """Initialize the MNWorker.
        """
        self.context = context
        self.endpoint = endpoint
        self.service = service
        self.type = worker_type
        self.address = address
        self.protocols = protocols
        self.envelope = None
        self.HB_RETRIES = HB_RETRIES
        self.HB_INTERVAL = HB_INTERVAL
        self._data = {}
        self.stream = None
        self._tmo = None
        self.timed_out = False
        self.need_handshake = True
        self.connected = False
        self.ticker = None
        self._delayed_cb = None
        self._create_stream()
        _LOG.info("Worker initialized and can be found at '%s'" % endpoint)
        return

    def _create_stream(self):
        """Helper to create the socket and the stream.
        """
        socket = self.context.socket(zmq.DEALER)
        ioloop = IOLoop.instance()
        self.stream = ZMQStream(socket, ioloop)
        self.stream.on_recv(self._on_message)
        self.stream.socket.setsockopt(zmq.LINGER, 0)
        self.stream.connect(self.endpoint)
        self.ticker = PeriodicCallback(self._tick, self.HB_INTERVAL)
        self._send_ready()
        self.ticker.start()
        return

    def _send_ready(self):
        """Helper method to prepare and send the workers READY message.
        """
        _LOG.debug("Informing broker I am ready")
        ready_msg = [
            b'', WORKER_PROTO, MSG_READY, self.service, self.type,
            self.address, self.protocols
        ]
        if self.stream.closed():
            self.shutdown()
        self.stream.send_multipart(ready_msg)
        self.curr_retries = self.HB_RETRIES
        return

    def _tick(self):
        """Method called every HB_INTERVAL milliseconds.
        """
        self.curr_retries -= 1
        self.send_hb()
        if self.curr_retries >= 0:
            return
        # connection seems to be dead
        self.shutdown()
        # try to recreate it
        # self._delayed_cb = IOLoop.call_later(self._create_stream, 5000)
        # self._delayed_cb = IOLoop.add_timeout(self._create_stream, 5000)
        self._delayed_cb = DelayedCallback(self._create_stream,
                                           self.HB_INTERVAL)
        self._delayed_cb.start()
        return

    def send_hb(self):
        """Construct and send HB message to broker.
        """
        _LOG.debug("Sending heartbeat")
        msg = [b'', WORKER_PROTO, MSG_HEARTBEAT]
        if self.stream.closed():
            self.shutdown()
        self.stream.send_multipart(msg)
        return

    def shutdown(self):
        """Method to deactivate the worker connection completely.

        Will delete the stream and the underlying socket.
        """
        if self.ticker:
            self.ticker.stop()
            self.ticker = None
        if not self.stream:
            return
        self.stream.socket.close()
        self.stream.close()
        self.stream = None
        self.timed_out = False
        self.need_handshake = True
        self.connected = False
        return

    def reply(self, msg):
        """Send the given message.

        :param msg:    full message to send.
        :type msg:     can either be a byte-string or a list of byte-strings
        """
        if self.need_handshake:
            raise ConnectionNotReadyError()
        to_send = self.envelope
        self.envelope = None
        if isinstance(msg, list):
            to_send.extend(msg)
        else:
            to_send.append(msg)
        if self.stream.closed():
            self.shutdown()
        self.stream.send_multipart(to_send)
        return

    def _on_message(self, msg):
        """Helper method called on message receive.

        :param msg:    a list w/ the message parts
        :type msg:     a list of byte-strings
        """
        _LOG.debug("Received: %s." % msg)
        # 1st part is empty
        msg.pop(0)
        # 2nd part is protocol version
        proto = msg.pop(0)
        if proto != WORKER_PROTO:
            # ignore message from not supported protocol
            pass
        # 3rd part is message type
        msg_type = msg.pop(0)
        # XXX: hardcoded message types!
        # any message resets the retries counter
        self.need_handshake = False
        self.curr_retries = self.HB_RETRIES
        if msg_type == MSG_DISCONNECT:  # disconnect
            _LOG.info("Broker wants us to disconnect.")
            self.curr_retries = 0  # reconnect will be triggered by hb timer
        elif msg_type == MSG_QUERY:  # request
            # remaining parts are the user message
            _LOG.debug("Received new request: %s." % msg)
            envelope, msg = split_address(msg)
            envelope.append(b'')
            envelope = [b'', WORKER_PROTO, MSG_REPLY] + envelope  # reply
            self.envelope = envelope
            self.on_request(msg)
        else:
            # invalid message
            # ignored
            _LOG.debug('ignoring message with invalid id')
            pass
        return

    def on_request(self, msg):
        """Public method called when a request arrived.

        :param msg:    a list w/ the message parts
        :type msg:     a list of byte-strings

        Must be overloaded to provide support for various services!
        """
        pass
Esempio n. 43
0
class CryptoTransportLayer(TransportLayer):
    def __init__(self,
                 my_ip,
                 my_port,
                 market_id,
                 db,
                 bm_user=None,
                 bm_pass=None,
                 bm_port=None,
                 seed_mode=0,
                 dev_mode=False):

        self.log = logging.getLogger('[%s] %s' %
                                     (market_id, self.__class__.__name__))
        requests_log = logging.getLogger("requests")
        requests_log.setLevel(logging.WARNING)

        # Connect to database
        self.db = db

        self.bitmessage_api = None
        if (bm_user, bm_pass, bm_port) != (None, None, None):
            if not self._connect_to_bitmessage(bm_user, bm_pass, bm_port):
                self.log.info('Bitmessage not installed or started')

        try:
            socket.inet_pton(socket.AF_INET6, my_ip)
            my_uri = "tcp://[%s]:%s" % (my_ip, my_port)
        except socket.error:
            my_uri = "tcp://%s:%s" % (my_ip, my_port)

        self.market_id = market_id
        self.nick_mapping = {}
        self.uri = my_uri
        self.ip = my_ip
        self.nickname = ""
        self._dev_mode = dev_mode

        # Set up
        self._setup_settings()

        self.dht = DHT(self, self.market_id, self.settings, self.db)

        # self._myself = ec.ECC(pubkey=self.pubkey.decode('hex'),
        #                       privkey=self.secret.decode('hex'),
        #                       curve='secp256k1')

        TransportLayer.__init__(self, market_id, my_ip, my_port, self.guid,
                                self.nickname)

        self.setup_callbacks()
        self.listen(self.pubkey)

        if seed_mode == 0 and not dev_mode:
            self.start_ip_address_checker()

    def setup_callbacks(self):
        self.add_callbacks([('hello', self._ping),
                            ('findNode', self._find_node),
                            ('findNodeResponse', self._find_node_response),
                            ('store', self._store_value)])

    def start_ip_address_checker(self):
        '''Checks for possible public IP change'''
        self.caller = PeriodicCallback(self._ip_updater_periodic_callback,
                                       5000, ioloop.IOLoop.instance())
        self.caller.start()

    def _ip_updater_periodic_callback(self):
        try:
            r = requests.get('https://icanhazip.com')

            if r and hasattr(r, 'text'):
                ip = r.text
                ip = ip.strip(' \t\n\r')
                if ip != self.ip:
                    self.ip = ip
                    try:
                        socket.inet_pton(socket.AF_INET6, self.ip)
                        my_uri = 'tcp://[%s]:%s' % (self.ip, self.port)
                    except socket.error:
                        my_uri = 'tcp://%s:%s' % (self.ip, self.port)
                    self.uri = my_uri
                    self.stream.close()
                    self.listen(self.pubkey)

                    self.dht._iterativeFind(self.guid, [], 'findNode')
            else:
                self.log.error('Could not get IP')
        except Exception as e:
            self.log.error('[Requests] error: %s' % e)

    def save_peer_to_db(self, peer_tuple):
        uri = peer_tuple[0]
        pubkey = peer_tuple[1]
        guid = peer_tuple[2]
        nickname = peer_tuple[3]

        # Update query
        self.db.deleteEntries("peers", {"uri": uri, "guid": guid}, "OR")
        # if len(results) > 0:
        #     self.db.updateEntries("peers", {"id": results[0]['id']}, {"market_id": self.market_id, "uri": uri, "pubkey": pubkey, "guid": guid, "nickname": nickname})
        # else:
        if guid is not None:
            self.db.insertEntry(
                "peers", {
                    "uri": uri,
                    "pubkey": pubkey,
                    "guid": guid,
                    "nickname": nickname,
                    "market_id": self.market_id
                })

    def _connect_to_bitmessage(self, bm_user, bm_pass, bm_port):
        # Get bitmessage going
        # First, try to find a local instance
        result = False
        try:
            self.log.info(
                '[_connect_to_bitmessage] Connecting to Bitmessage on port %s'
                % bm_port)
            self.bitmessage_api = xmlrpclib.ServerProxy(
                "http://{}:{}@localhost:{}/".format(bm_user, bm_pass, bm_port),
                verbose=0)
            result = self.bitmessage_api.add(2, 3)
            self.log.info(
                "[_connect_to_bitmessage] Bitmessage API is live".format(
                    result))
        # If we failed, fall back to starting our own
        except Exception as e:
            self.log.info(
                "Failed to connect to bitmessage instance: {}".format(e))
            self.bitmessage_api = None
            # self._log.info("Spawning internal bitmessage instance")
            # # Add bitmessage submodule path
            # sys.path.insert(0, os.path.join(
            #     os.path.dirname(__file__), '..', 'pybitmessage', 'src'))
            # import bitmessagemain as bitmessage
            # bitmessage.logger.setLevel(logging.WARNING)
            # bitmessage_instance = bitmessage.Main()
            # bitmessage_instance.start(daemon=True)
            # bminfo = bitmessage_instance.getApiAddress()
            # if bminfo is not None:
            #     self._log.info("Started bitmessage daemon at %s:%s".format(
            #         bminfo['address'], bminfo['port']))
            #     bitmessage_api = xmlrpclib.ServerProxy("http://{}:{}@{}:{}/".format(
            #         bm_user, bm_pass, bminfo['address'], bminfo['port']))
            # else:
            #     self._log.info("Failed to start bitmessage dameon")
            #     self._bitmessage_api = None
        return result

    def _checkok(self, msg):
        self.log.info('Check ok')

    def get_guid(self):
        return self.guid

    def get_dht(self):
        return self.dht

    def get_bitmessage_api(self):
        return self.bitmessage_api

    def get_market_id(self):
        return self.market_id

    # def get_myself(self):
    #     return self._myself

    def _ping(self, msg):

        self.log.info('Pinged %s ' % json.dumps(msg, ensure_ascii=False))
        #
        # pinger = CryptoPeerConnection(self, msg['uri'], msg['pubkey'], msg['senderGUID'])
        # pinger.send_raw(json.dumps(
        #     {"type": "hello_response",
        #      "senderGUID": self.guid,
        #      "uri": self.uri,
        #      "senderNick": self.nickname,
        #      "pubkey": self.pubkey,
        #     }))

    def _store_value(self, msg):
        self.dht._on_storeValue(msg)

    def _find_node(self, msg):
        self.dht.on_find_node(msg)

    def _find_node_response(self, msg):
        self.dht.on_findNodeResponse(self, msg)

    def _setup_settings(self):

        try:
            self.settings = self.db.selectEntries(
                "settings", {"market_id": self.market_id})
        except (OperationalError, DatabaseError) as e:
            print e
            raise SystemExit(
                "database file %s corrupt or empty - cannot continue" %
                self.db.db_path)

        if len(self.settings) == 0:
            self.settings = {"market_id": self.market_id, "welcome": "enable"}
            self.db.insertEntry("settings", self.settings)
        else:
            self.settings = self.settings[0]

        # Generate PGP key during initial setup or if previous PGP gen failed
        if not ('PGPPubKey' in self.settings and self.settings["PGPPubKey"]):
            try:
                self.log.info(
                    'Generating PGP keypair. This may take several minutes...')
                print 'Generating PGP keypair. This may take several minutes...'
                gpg = gnupg.GPG()
                input_data = gpg.gen_key_input(
                    key_type="RSA",
                    key_length=2048,
                    name_email='*****@*****.**',
                    name_comment="Autogenerated by Open Bazaar",
                    passphrase="P@ssw0rd")
                assert input_data is not None
                key = gpg.gen_key(input_data)
                assert key is not None

                pubkey_text = gpg.export_keys(key.fingerprint)
                newsettings = {
                    "PGPPubKey": pubkey_text,
                    "PGPPubkeyFingerprint": key.fingerprint
                }
                self.db.updateEntries("settings",
                                      {"market_id": self.market_id},
                                      newsettings)
                self.settings.update(newsettings)

                self.log.info('PGP keypair generated.')
            except Exception as e:
                self.log.error("Encountered a problem with GPG: %s" % e)
                raise SystemExit("Encountered a problem with GPG: %s" % e)

        if not ('pubkey' in self.settings and self.settings['pubkey']):
            # Generate Bitcoin keypair
            self._generate_new_keypair()

        if not ('bitmessage' in self.settings and self.settings['bitmessage']):
            # Generate Bitmessage address
            if self.bitmessage_api is not None:
                self._generate_new_bitmessage_address()

        if not ('nickname' in self.settings and self.settings['nickname']):
            newsettings = {'nickname': 'Default'}
            self.db.updateEntries('settings', {"market_id": self.market_id},
                                  newsettings)
            self.settings.update(newsettings)

        self.nickname = self.settings[
            'nickname'] if 'nickname' in self.settings else ""
        self.secret = self.settings[
            'secret'] if 'secret' in self.settings else ""
        self.pubkey = self.settings[
            'pubkey'] if 'pubkey' in self.settings else ""
        self.privkey = self.settings.get('privkey')
        self.btc_pubkey = privkey_to_pubkey(self.privkey)
        self.guid = self.settings['guid'] if 'guid' in self.settings else ""
        self.sin = self.settings['sin'] if 'sin' in self.settings else ""
        self.bitmessage = self.settings[
            'bitmessage'] if 'bitmessage' in self.settings else ""

        self._myself = ec.ECC(pubkey=pubkey_to_pyelliptic(
            self.pubkey).decode('hex'),
                              raw_privkey=self.secret.decode('hex'),
                              curve='secp256k1')

        self.log.debug('Retrieved Settings: \n%s', pformat(self.settings))

    def _generate_new_keypair(self):
        secret = str(random.randrange(2**256))
        self.secret = hashlib.sha256(secret).hexdigest()
        self.pubkey = privtopub(self.secret)
        self.privkey = random_key()
        print 'PRIVATE KEY: ', self.privkey
        self.btc_pubkey = privtopub(self.privkey)
        print 'PUBLIC KEY: ', self.btc_pubkey

        # Generate SIN
        sha_hash = hashlib.sha256()
        sha_hash.update(self.pubkey)
        ripe_hash = hashlib.new('ripemd160')
        ripe_hash.update(sha_hash.digest())

        self.guid = ripe_hash.digest().encode('hex')
        self.sin = obelisk.EncodeBase58Check('\x0F\x02%s' + ripe_hash.digest())

        newsettings = {
            "secret": self.secret,
            "pubkey": self.pubkey,
            "privkey": self.privkey,
            "guid": self.guid,
            "sin": self.sin
        }
        self.db.updateEntries("settings", {"market_id": self.market_id},
                              newsettings)
        self.settings.update(newsettings)

    def _generate_new_bitmessage_address(self):
        # Use the guid generated previously as the key
        self.bitmessage = self.bitmessage_api.createRandomAddress(
            self.guid.encode('base64'), False, 1.05, 1.1111)
        newsettings = {"bitmessage": self.bitmessage}
        self.db.updateEntries("settings", {"market_id": self.market_id},
                              newsettings)
        self.settings.update(newsettings)

    def join_network(self, seed_peers=[], callback=lambda msg: None):

        self.log.info('Joining network')

        known_peers = []

        # Connect up through seed servers
        for idx, seed in enumerate(seed_peers):
            try:
                socket.inet_pton(socket.AF_INET6, seed)
                seed_peers[idx] = "tcp://[%s]:12345" % seed
            except socket.error:
                seed_peers[idx] = "tcp://%s:12345" % seed

        # Connect to persisted peers
        db_peers = self.get_past_peers()

        known_peers = list(set(seed_peers)) + list(set(db_peers))

        print 'known_peers', known_peers

        self.connect_to_peers(known_peers)

        # Populate routing table by searching for self
        if len(known_peers) > 0:
            self.search_for_my_node()

        if callback is not None:
            callback('Joined')

    def get_past_peers(self):
        peers = []
        result = self.db.selectEntries("peers", {"market_id": self.market_id})
        for peer in result:
            peers.append(peer['uri'])
        return peers

    def search_for_my_node(self):
        print 'Searching for myself'
        self.dht._iterativeFind(self.guid, self.dht.knownNodes, 'findNode')

    def connect_to_peers(self, known_peers):
        for known_peer in known_peers:
            t = Thread(target=self.dht.add_peer, args=(
                self,
                known_peer,
            ))
            t.start()

    def get_crypto_peer(self,
                        guid=None,
                        uri=None,
                        pubkey=None,
                        nickname=None,
                        callback=None):
        if guid == self.guid:
            self.log.error('Cannot get CryptoPeerConnection for your own node')
            return

        self.log.debug('Getting CryptoPeerConnection' +
                       '\nGUID:%s\nURI:%s\nPubkey:%s\nNickname:%s' %
                       (guid, uri, pubkey, nickname))

        return connection.CryptoPeerConnection(self,
                                               uri,
                                               pubkey,
                                               guid=guid,
                                               nickname=nickname,
                                               callback=callback)

    def addCryptoPeer(self, peer_to_add):

        foundOutdatedPeer = False
        for idx, peer in enumerate(self.dht.activePeers):

            if (peer.address, peer.guid, peer.pub) == \
               (peer_to_add.address, peer_to_add.guid, peer_to_add.pub):
                self.log.info('Found existing peer, not adding.')
                return

            if peer.guid == peer_to_add.guid or \
               peer.pub == peer_to_add.pub or \
               peer.address == peer_to_add.address:

                foundOutdatedPeer = True
                self.log.info('Found an outdated peer')

                # Update existing peer
                self.activePeers[idx] = peer_to_add
                self.dht.add_peer(self, peer_to_add.address, peer_to_add.pub,
                                  peer_to_add.guid, peer_to_add.nickname)

        if not foundOutdatedPeer and peer_to_add.guid != self.guid:
            self.log.info('Adding crypto peer at %s' % peer_to_add.nickname)
            self.dht.add_peer(self, peer_to_add.address, peer_to_add.pub,
                              peer_to_add.guid, peer_to_add.nickname)

    def get_profile(self):
        peers = {}

        self.settings = self.db.selectEntries("settings",
                                              {"market_id": self.market_id})[0]
        for uri, peer in self.peers.iteritems():
            if peer.pub:
                peers[uri] = peer.pub.encode('hex')
        return {
            'uri': self.uri,
            'pub': self._myself.get_pubkey().encode('hex'),
            'nickname': self.nickname,
            'peers': peers
        }

    def respond_pubkey_if_mine(self, nickname, ident_pubkey):

        if ident_pubkey != self.pubkey:
            self.log.info("Public key does not match your identity")
            return

        # Return signed pubkey
        pubkey = self._myself.pubkey
        ec_key = obelisk.EllipticCurveKey()
        ec_key.set_secret(self.secret)
        digest = obelisk.Hash(pubkey)
        signature = ec_key.sign(digest)

        # Send array of nickname, pubkey, signature to transport layer
        self.send(proto_response_pubkey(nickname, pubkey, signature))

    def pubkey_exists(self, pub):

        for uri, peer in self.peers.iteritems():
            self.log.info('PEER: %s Pub: %s' %
                          (peer.pub.encode('hex'), pub.encode('hex')))
            if peer.pub.encode('hex') == pub.encode('hex'):
                return True

        return False

    def create_peer(self, uri, pub, node_guid):

        if pub:
            pub = pub.decode('hex')

        # Create the peer if public key is not already in the peer list
        # if not self.pubkey_exists(pub):
        self.peers[uri] = connection.CryptoPeerConnection(
            self, uri, pub, node_guid)

        # Call 'peer' callbacks on listeners
        self.trigger_callbacks('peer', self.peers[uri])

        # else:
        #    print 'Pub Key is already in peer list'

    def send(self, data, send_to=None, callback=lambda msg: None):

        self.log.debug("Outgoing Data: %s %s" % (data, send_to))

        # Directed message
        if send_to is not None:

            peer = self.dht.routingTable.getContact(send_to)
            if not peer:
                for activePeer in self.dht.activePeers:
                    if activePeer.guid == send_to:
                        peer = activePeer
                        break

            # peer = CryptoPeerConnection(msg['uri'])
            if peer:
                self.log.debug('Directed Data (%s): %s' % (send_to, data))
                try:
                    peer.send(data, callback=callback)
                except Exception as e:
                    self.log.error('Not sending message directly to peer %s' %
                                   e)
            else:
                self.log.error('No peer found')

        else:
            # FindKey and then send

            for peer in self.dht.activePeers:
                try:
                    peer = self.dht.routingTable.getContact(peer.guid)
                    data['senderGUID'] = self.guid
                    data['pubkey'] = self.pubkey

                    def cb(msg):
                        self.log.debug('Message Back: \n%s' % pformat(msg))

                    peer.send(data, cb)

                except:
                    self.log.info("Error sending over peer!")
                    traceback.print_exc()

    def send_enc(self, uri, msg):
        peer = self.peers[uri]
        pub = peer.pub

        # Now send a hello message to the peer
        if pub:
            self.log.info("Sending encrypted [%s] message to %s" %
                          (msg['type'], uri))
            peer.send(msg)
        else:
            # Will send clear profile on initial if no pub
            self.log.info("Sending unencrypted [%s] message to %s" %
                          (msg['type'], uri))
            self.peers[uri].send_raw(json.dumps(msg))

    def _init_peer(self, msg):

        uri = msg['uri']
        pub = msg.get('pub')
        nickname = msg.get('nickname')
        msg_type = msg.get('type')
        guid = msg['guid']

        if not self.valid_peer_uri(uri):
            self.log.error("Invalid Peer: %s " % uri)
            return

        if uri not in self.peers:
            # Unknown peer
            self.log.info('Add New Peer: %s' % uri)
            self.create_peer(uri, pub, guid)

            if not msg_type:
                self.send_enc(uri, hello_request(self.get_profile()))
            elif msg_type == 'hello_request':
                self.send_enc(uri, hello_response(self.get_profile()))

        else:
            # Known peer
            if pub:
                # test if we have to update the pubkey
                if not self.peers[uri].pub:
                    self.log.info("Setting public key for seed node")
                    self.peers[uri].pub = pub.decode('hex')
                    self.trigger_callbacks('peer', self.peers[uri])

                if self.peers[uri].pub != pub.decode('hex'):
                    self.log.info("Updating public key for node")
                    self.peers[uri].nickname = nickname
                    self.peers[uri].pub = pub.decode('hex')

                    self.trigger_callbacks('peer', self.peers[uri])

            if msg_type == 'hello_request':
                # reply only if necessary
                self.send_enc(uri, hello_response(self.get_profile()))

    def _on_message(self, msg):

        # here goes the application callbacks
        # we get a "clean" msg which is a dict holding whatever
        # self.log.info("[On Message] Data received: %s" % msg)

        pubkey = msg.get('pubkey')
        uri = msg.get('uri')
        ip = urlparse(uri).hostname
        port = urlparse(uri).port
        guid = msg.get('senderGUID')
        nickname = msg.get('senderNick')[:120]

        self.dht.add_known_node((ip, port, guid, nickname))
        self.log.info('ON MESSAGE %s' % json.dumps(msg, ensure_ascii=False))

        self.dht.add_peer(self, uri, pubkey, guid, nickname)
        self.log.debug('Callbacks %s' % self.callbacks)
        t = Thread(target=self.trigger_callbacks, args=(
            msg['type'],
            msg,
        ))
        t.start()

    def _on_raw_message(self, serialized):
        try:

            # Decompress message
            serialized = zlib.decompress(serialized)

            msg = json.loads(serialized)
            self.log.info("Message Received [%s]" % msg.get('type', 'unknown'))

            if msg.get('type') is None:

                data = msg.get('data').decode('hex')
                sig = msg.get('sig').decode('hex')

                try:
                    cryptor = makePrivCryptor(self.secret)

                    try:
                        data = cryptor.decrypt(data)
                    except Exception as e:
                        self.log.info('Exception: %s' % e)

                    self.log.debug('Signature: %s' % sig.encode('hex'))
                    self.log.debug('Signed Data: %s' % data)

                    # Check signature
                    data_json = json.loads(data)
                    sigCryptor = makePubCryptor(data_json['pubkey'])
                    if sigCryptor.verify(sig, data):
                        self.log.info('Verified')
                    else:
                        self.log.error(
                            'Message signature could not be verified %s' % msg)
                        # return

                    msg = json.loads(data)
                    self.log.debug('Message Data %s ' % msg)
                except Exception as e:
                    self.log.error('Could not decrypt message properly %s' % e)

        except ValueError:
            try:
                # Encrypted?
                try:
                    msg = self._myself.decrypt(serialized)
                    msg = json.loads(msg)

                    self.log.info("Decrypted Message [%s]" %
                                  msg.get('type', 'unknown'))
                except:
                    self.log.error("Could not decrypt message: %s" % msg)
                    return
            except:
                self.log.error('Message probably sent using incorrect pubkey')

                return

        if msg.get('type') is not None:
            self._on_message(msg)
        else:
            self.log.error('Received a message with no type')

    def shutdown(self):
        print "CryptoTransportLayer.shutdown()!"
        try:
            TransportLayer.shutdown(self)
            print "CryptoTransportLayer.shutdown(): ZMQ sockets destroyed."
        except Exception as e:
            self.log.error("Transport shutdown error: " + e.message)

        print "Notice: explicit DHT Shutdown not implemented."

        try:
            self.bitmessage_api.close()
        except Exception as e:
            # might not even be open, not much more we can do on our way out if exception thrown here.
            self.log.error(
                "Could not shutdown bitmessage_api's ServerProxy. " +
                e.message)
Esempio n. 44
0
class MDPWorker(object):
    """Class for the MDP worker side.

    Thin encapsulation of a zmq.DEALER socket.
    Provides a send method with optional timeout parameter.

    Will use a timeout to indicate a broker failure.
    """

    _proto_version = b'MDPW01'

    # TODO: integrate that into API
    HB_INTERVAL = 1000  # in milliseconds
    HB_LIVENESS = 3  # HBs to miss before connection counts as dead

    def __init__(self, context, endpoint, service):
        """Initialize the MDPWorker.

        context is the zmq context to create the socket from.
        service is a byte-string with the service name.
        """
        self.context = context
        self.endpoint = endpoint
        self.service = service
        self.stream = None
        self._tmo = None
        self.need_handshake = True
        self.ticker = None
        self._delayed_cb = None
        self._create_stream()
        return

    def _create_stream(self):
        """Helper to create the socket and the stream.
        """
        socket = self.context.socket(zmq.DEALER)
        ioloop = IOLoop.instance()
        self.stream = ZMQStream(socket, ioloop)
        self.stream.on_recv(self._on_message)
        self.stream.socket.setsockopt(zmq.LINGER, 0)
        self.stream.connect(self.endpoint)
        self.ticker = PeriodicCallback(self._tick, self.HB_INTERVAL)
        self._send_ready()
        self.ticker.start()
        return

    def _send_ready(self):
        """Helper method to prepare and send the workers READY message.
        """
        ready_msg = [b'', self._proto_version, b'\x01', self.service]
        self.stream.send_multipart(ready_msg)
        self.curr_liveness = self.HB_LIVENESS
        return

    def _tick(self):
        """Method called every HB_INTERVAL milliseconds.
        """
        self.curr_liveness -= 1
        ##         print '%.3f tick - %d' % (time.time(), self.curr_liveness)
        self.send_hb()
        if self.curr_liveness >= 0:
            return
        ## print '%.3f lost connection' % time.time()
        # ouch, connection seems to be dead
        self.shutdown()
        # try to recreate it
        self._delayed_cb = DelayedCallback(self._create_stream, 5000)
        self._delayed_cb.start()
        return

    def send_hb(self):
        """Construct and send HB message to broker.
        """
        msg = [b'', self._proto_version, b'\x04']
        self.stream.send_multipart(msg)
        return

    def shutdown(self):
        """Method to deactivate the worker connection completely.

        Will delete the stream and the underlying socket.
        """
        if self.ticker:
            self.ticker.stop()
            self.ticker = None
        if not self.stream:
            return
        self.stream.socket.close()
        self.stream.close()
        self.stream = None
        self.timed_out = False
        self.need_handshake = True
        self.connected = False
        return

    def reply(self, msg):
        """Send the given message.

        msg can either be a byte-string or a list of byte-strings.
        """
        ##         if self.need_handshake:
        ##             raise ConnectionNotReadyError()
        # prepare full message
        to_send = self.envelope
        self.envelope = None
        if isinstance(msg, list):
            to_send.extend(msg)
        else:
            to_send.append(msg)
        self.stream.send_multipart(to_send)
        return

    def _on_message(self, msg):
        """Helper method called on message receive.

        msg is a list w/ the message parts
        """
        # 1st part is empty
        msg.pop(0)
        # 2nd part is protocol version
        # TODO: version check
        proto = msg.pop(0)
        # 3rd part is message type
        msg_type = msg.pop(0)
        # XXX: hardcoded message types!
        # any message resets the liveness counter
        self.need_handshake = False
        self.curr_liveness = self.HB_LIVENESS
        if msg_type == b'\x05':  # disconnect
            self.curr_liveness = 0  # reconnect will be triggered by hb timer
        elif msg_type == b'\x02':  # request
            # remaining parts are the user message
            envelope, msg = split_address(msg)
            envelope.append(b'')
            envelope = [b'', self._proto_version, b'\x03'] + envelope  # REPLY
            self.envelope = envelope
            self.on_request(msg)
        else:
            # invalid message
            # ignored
            pass
        return

    def on_request(self, msg):
        """Public method called when a request arrived.

        Must be overloaded!
        """
        pass
Esempio n. 45
0
class CloneServer(object):
    
    # Our server is defined by these properties
    ctx = None                  # Context wrapper
    kvmap = None                # Key-value store
    bstar = None                # Binary Star
    sequence = 0                # How many updates so far
    port = None                 # Main port we're working on
    peer = None                 # Main port of our peer
    publisher = None            # Publish updates and hugz
    collector = None            # Collect updates from clients
    subscriber = None           # Get updates from peer
    pending = None              # Pending updates from client
    primary = False             # True if we're primary
    master = False              # True if we're master
    slave = False               # True if we're slave
    
    def __init__(self, primary=True, ports=(5556,5566)):
        self.primary = primary
        if primary:
            self.port, self.peer = ports
            frontend = "tcp://*:5003"
            backend  = "tcp://localhost:5004"
            self.kvmap = {}
        else:
            self.peer, self.port = ports
            frontend = "tcp://*:5004"
            backend  = "tcp://localhost:5003"
        
        self.ctx = zmq.Context.instance()
        self.pending = []
        self.bstar = BinaryStar(primary, frontend, backend)
        
        self.bstar.register_voter("tcp://*:%i" % self.port, zmq.ROUTER, self.handle_snapshot)
        
        # Set up our clone server sockets
        self.publisher = self.ctx.socket(zmq.PUB)
        self.collector = self.ctx.socket(zmq.SUB)
        self.collector.setsockopt(zmq.SUBSCRIBE, b'')
        self.publisher.bind("tcp://*:%d" % (self.port + 1))
        self.collector.bind("tcp://*:%d" % (self.port + 2))
        
        # Set up our own clone client interface to peer
        self.subscriber = self.ctx.socket(zmq.SUB)
        self.subscriber.setsockopt(zmq.SUBSCRIBE, b'')
        self.subscriber.connect("tcp://localhost:%d" % (self.peer + 1))
        
        # Register state change handlers
        self.bstar.master_callback = self.become_master
        self.bstar.slave_callback = self.become_slave

        # Wrap sockets in ZMQStreams for IOLoop handlers
        self.publisher = ZMQStream(self.publisher)
        self.subscriber = ZMQStream(self.subscriber)
        self.collector = ZMQStream(self.collector)
        
        # Register our handlers with reactor
        self.collector.on_recv(self.handle_collect)
        self.flush_callback = PeriodicCallback(self.flush_ttl, 1000)
        self.hugz_callback = PeriodicCallback(self.send_hugz, 1000)
        
        # basic log formatting:
        logging.basicConfig(format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S",
                level=logging.INFO)
    
    def start(self):
        # start periodic callbacks
        self.flush_callback.start()
        self.hugz_callback.start()
        # Run bstar reactor until process interrupted
        try:
            self.bstar.start()
        except KeyboardInterrupt:
            pass
    
    def handle_snapshot(self, socket, msg):
        """snapshot requests"""
        if msg[1] != "ICANHAZ?" or len(msg) != 3:
            logging.error("E: bad request, aborting")
            dump(msg)
            self.bstar.loop.stop()
            return
        identity, request = msg[:2]
        if len(msg) >= 3:
            subtree = msg[2]
            # Send state snapshot to client
            route = Route(socket, identity, subtree)

            # For each entry in kvmap, send kvmsg to client
            for k,v in self.kvmap.items():
                send_single(k,v,route)
            
            # Now send END message with sequence number
            logging.info("I: Sending state shapshot=%d" % self.sequence)
            socket.send(identity, zmq.SNDMORE)
            kvmsg = KVMsg(self.sequence)
            kvmsg.key = "KTHXBAI"
            kvmsg.body = subtree
            kvmsg.send(socket)
    
    def handle_collect(self, msg):
        """Collect updates from clients
        
        If we're master, we apply these to the kvmap
        If we're slave, or unsure, we queue them on our pending list
        """
        kvmsg = KVMsg.from_msg(msg)
        if self.master:
            self.sequence += 1
            kvmsg.sequence = self.sequence
            kvmsg.send(self.publisher)
            ttl = kvmsg.get('ttl')
            if ttl is not None:
                kvmsg['ttl'] = time.time() + ttl
            kvmsg.store(self.kvmap)
            logging.info("I: publishing update=%d", self.sequence)
        else:
            # If we already got message from master, drop it, else
            # hold on pending list
            if not self.was_pending(kvmsg):
                self.pending.append(kvmsg)
    
    def was_pending(self, kvmsg):
        """If message was already on pending list, remove and return True.
        Else return False.
        """
        found = False
        for idx, held in enumerate(self.pending):
            if held.uuid == kvmsg.uuid:
                found = True
                break
        if found:
            self.pending.pop(idx)
        return found
    
    def flush_ttl(self):
        """Purge ephemeral values that have expired"""
        if self.kvmap:
            for key,kvmsg in self.kvmap.items():
                self.flush_single(kvmsg)
    
    def flush_single(self, kvmsg):
        """If key-value pair has expired, delete it and publish the fact
        to listening clients."""
        if kvmsg.get('ttl', 0) <= time.time():
            kvmsg.body = ""
            self.sequence += 1
            kvmsg.sequence = self.sequence
            kvmsg.send(self.publisher)
            del self.kvmap[kvmsg.key]
            logging.info("I: publishing delete=%d", self.sequence)
    
    def send_hugz(self):
        """Send hugz to anyone listening on the publisher socket"""
        kvmsg = KVMsg(self.sequence)
        kvmsg.key = "HUGZ"
        kvmsg.body = ""
        kvmsg.send(self.publisher)

    # ---------------------------------------------------------------------
    # State change handlers

    def become_master(self):
        """We're becoming master

        The backup server applies its pending list to its own hash table,
        and then starts to process state snapshot requests.
        """
        self.master = True
        self.slave = False
        # stop receiving subscriber updates while we are master
        self.subscriber.stop_on_recv()
        
        # Apply pending list to own kvmap
        while self.pending:
            kvmsg = self.pending.pop(0)
            self.sequence += 1
            kvmsg.sequence = self.sequence
            kvmsg.store(self.kvmap)
            logging.info ("I: publishing pending=%d", self.sequence)


    def become_slave(self):
        """We're becoming slave"""
        # clear kvmap
        self.kvmap = None
        self.master = False
        self.slave = True
        self.subscriber.on_recv(self.handle_subscriber)
    
    def handle_subscriber(self, msg):
        """Collect updates from peer (master)
        We're always slave when we get these updates
        """
        if self.master:
            logging.warn("received subscriber message, but we are master %s", msg)
            return
        
        # Get state snapshot if necessary
        if self.kvmap is None:
            self.kvmap = {}
            snapshot = self.ctx.socket(zmq.DEALER)
            snapshot.linger = 0
            snapshot.connect("tcp://localhost:%i" % self.peer)
            
            logging.info ("I: asking for snapshot from: tcp://localhost:%d",
                        self.peer)
            snapshot.send_multipart(["ICANHAZ?", ''])
            while True:
                try:
                    kvmsg = KVMsg.recv(snapshot)
                except KeyboardInterrupt:
                    # Interrupted
                    self.bstar.loop.stop()
                    return
                if kvmsg.key == "KTHXBAI":
                    self.sequence = kvmsg.sequence
                    break          # Done
                kvmsg.store(self.kvmap)
        
            logging.info ("I: received snapshot=%d", self.sequence)
        
        # Find and remove update off pending list
        kvmsg = KVMsg.from_msg(msg)
        # update integer ttl -> timestamp
        ttl = kvmsg.get('ttl')
        if ttl is not None:
            kvmsg['ttl'] = time.time() + ttl
        
        if kvmsg.key != "HUGZ":
            if not self.was_pending(kvmsg):
                # If master update came before client update, flip it
                # around, store master update (with sequence) on pending
                # list and use to clear client update when it comes later
                self.pending.append(kvmsg)
        
            # If update is more recent than our kvmap, apply it
            if (kvmsg.sequence > self.sequence):
                self.sequence = kvmsg.sequence
                kvmsg.store(self.kvmap)
                logging.info ("I: received update=%d", self.sequence)
Esempio n. 46
0
class Master(object):
    def __init__(self,
                 frontier,
                 data_in_sock='ipc:///tmp/robot-data-w2m.sock',
                 data_out_sock='ipc:///tmp/robot-data-m2w.sock',
                 msg_in_sock='ipc:///tmp/robot-msg-w2m.sock',
                 msg_out_sock='ipc:///tmp/robot-msg-m2w.sock',
                 io_loop=None):
        self.identity = 'master:%s:%s' % (socket.gethostname(), os.getpid())

        context = zmq.Context()

        self._io_loop = io_loop or IOLoop.instance()

        self._in_socket = context.socket(zmq.SUB)
        self._in_socket.setsockopt(zmq.SUBSCRIBE, '')
        self._in_socket.bind(data_in_sock)
        self._in_stream = ZMQStream(self._in_socket, io_loop)

        self._out_socket = context.socket(zmq.PUSH)
        self._out_socket.bind(data_out_sock)
        self._out_stream = ZMQStream(self._out_socket, io_loop)

        self._online_workers = set()
        self._running = False

        self._updater = PeriodicCallback(self._send_next, 100, io_loop=io_loop)
        self._reloader = PeriodicCallback(self.reload, 1000, io_loop=io_loop)

        self.frontier = frontier
        self.messenger = ServerMessenger(msg_in_sock, msg_out_sock, context,
                                         io_loop)

    def start(self):
        logging.info('[%s] starting', self.identity)
        self.messenger.add_callback(CTRL_MSG_WORKER, self._on_worker_msg)
        self.messenger.start()

        self._in_stream.on_recv(self._on_receive_processed)
        self._updater.start()
        self._reloader.start()
        self._running = True

    def stop(self):
        self._running = False
        self._reloader.stop()
        self._updater.stop()
        self.messenger.stop()
#        self.messenger.publish(CTRL_MSG_WORKER, self.identity,
#                CTRL_MSG_WORKER_QUIT)

    def close(self):
        self._in_stream.close()
        self._in_socket.close()
        self._out_stream.close()
        self._out_socket.close()
        self.messenger.close()

    def reload(self):
        pass

    def _on_worker_msg(self, msg):
        if msg.data == CTRL_MSG_WORKER_ONLINE:
            self._online_workers.add(msg.identity)
            logging.info('[%s] append [%s]', self.identity, msg.identity)
            self._send_next()


#        if msg.data == CTRL_MSG_WORKER_QUIT_ACK:
#            if msg.identity in self._online_workers:
#                self._online_workers.remove(msg.identity)

    def _send_next(self):
        if not self._running:
            return

        worker_num = len(self._online_workers)

        if self._running and worker_num > 0:
            while self._out_stream._send_queue.qsize() < worker_num * 4:
                request = self.frontier.get_next_request()
                if not request:
                    break

                msg = RequestMessage(self.identity, request)
                self._out_stream.send_multipart(msg.serialize())
                logging.debug('[%s] send request(%s)', self.identity,
                              request.url)

                self.frontier.reload_request(request)

    def _on_receive_processed(self, zmq_msg):
        msg = ResponseMessage.deserialize(zmq_msg)
        request = msg.response.request
        logging.debug('[%s] receive response(%s)', self.identity, request.url)
        self._send_next()
Esempio n. 47
0
class MDPBroker(object):

    """The MDP broker class.

    The broker routes messages from clients to appropriate workers based on the
    requested service.

    This base class defines the overall functionality and the API. Subclasses are
    ment to implement additional features (like logging).

    The broker uses ØMQ XREQ sockets to deal witch clients and workers. These sockets
    are wrapped in pyzmq streams to fit well into IOLoop.

    .. note::

      The workers will *always* be served by the `main_ep` endpoint.

      In a two-endpoint setup clients will be handled via the `opt_ep`
      endpoint.

    :param context:    the context to use for socket creation.
    :type context:     zmq.Context
    :param main_ep:    the primary endpoint for workers and clients.
    :type main_ep:     str
    :param opt_ep:     is an optional 2nd endpoint.
    :type opt_ep:      str
    :param worker_q:   the class to be used for the worker-queue.
    :type worker_q:    class
    """

    CLIENT_PROTO = b'MDPC01'  #: Client protocol identifier
    WORKER_PROTO = b'MDPW01'  #: Worker protocol identifier


    def __init__(self, context, main_ep, opt_ep=None, worker_q=None):
        """Init MDPBroker instance.
        """
        socket = context.socket(zmq.XREP)
        socket.bind(main_ep)
        self.main_stream = ZMQStream(socket)
        self.main_stream.on_recv(self.on_message)
        if opt_ep:
            socket = context.socket(zmq.XREP)
            socket.bind(opt_ep)
            self.client_stream = ZMQStream(socket)
            self.client_stream.on_recv(self.on_message)
        else:
            self.client_stream = self.main_stream
        self._workers = {}
        # services contain the worker queue and the request queue
        self._services = {}
        self._worker_cmds = { '\x01': self.on_ready,
                              '\x03': self.on_reply,
                              '\x04': self.on_heartbeat,
                              '\x05': self.on_disconnect,
                              }
        self.hb_check_timer = PeriodicCallback(self.on_timer, HB_INTERVAL)
        self.hb_check_timer.start()
        return

    def register_worker(self, wid, service):
        """Register the worker id and add it to the given service.

        Does nothing if worker is already known.

        :param wid:    the worker id.
        :type wid:     str
        :param service:    the service name.
        :type service:     str

        :rtype: None
        """
        if wid in self._workers:
            return
        self._workers[wid] = WorkerRep(self.WORKER_PROTO, wid, service, self.main_stream)
        if service in self._services:
            wq, wr = self._services[service]
            wq.put(wid)
        else:
            q = ServiceQueue()
            q.put(wid)
            self._services[service] = (q, [])
        return

    def unregister_worker(self, wid):
        """Unregister the worker with the given id.

        If the worker id is not registered, nothing happens.

        Will stop all timers for the worker.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """
        try:
            wrep = self._workers[wid]
        except KeyError:
            # not registered, ignore
            return
        wrep.shutdown()
        service = wrep.service
        if service in self._services:
            wq, wr = self._services[service]
            wq.remove(wid)
        del self._workers[wid]
        return

    def disconnect(self, wid):
        """Send disconnect command and unregister worker.

        If the worker id is not registered, nothing happens.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """
        try:
            wrep = self._workers[wid]
        except KeyError:
            # not registered, ignore
            return
        to_send = [ wid, self.WORKER_PROTO, b'\x05' ]
        self.main_stream.send_multipart(to_send)
        self.unregister_worker(wid)
        return

    def client_response(self, rp, service, msg):
        """Package and send reply to client.

        :param rp:       return address stack
        :type rp:        list of str
        :param service:  name of service
        :type service:   str
        :param msg:      message parts
        :type msg:       list of str

        :rtype: None
        """
        to_send = rp[:]
        to_send.extend([b'', self.CLIENT_PROTO, service])
        to_send.extend(msg)
        self.client_stream.send_multipart(to_send)
        return

    def shutdown(self):
        """Shutdown broker.

        Will unregister all workers, stop all timers and ignore all further
        messages.

        .. warning:: The instance MUST not be used after :func:`shutdown` has been called.

        :rtype: None
        """
        if self.client_stream == self.main_stream:
            self.client_stream = None
        self.main_stream.on_recv(None)
        self.main_stream.socket.setsockopt(zmq.LINGER, 0)
        self.main_stream.socket.close()
        self.main_stream.close()
        self.main_stream = None
        if self.client_stream:
            self.client_stream.on_recv(None)
            self.client_stream.socket.setsockopt(zmq.LINGER, 0)
            self.client_stream.socket.close()
            self.client_stream.close()
            self.client_stream = None
        self._workers = {}
        self._services = {}
        return

    def on_timer(self):
        """Method called on timer expiry.

        Checks which workers are dead and unregisters them.

        :rtype: None
        """
        for wrep in self._workers.values():
            if not wrep.is_alive():
                self.unregister_worker(wrep.id)
        return

    def on_ready(self, rp, msg):
        """Process worker READY command.

        Registers the worker for a service.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        ret_id = rp[0]
        self.register_worker(ret_id, msg[0])
        return

    def on_reply(self, rp, msg):
        """Process worker REPLY command.

        Route the `msg` to the client given by the address(es) in front of `msg`.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        ret_id = rp[0]
        wrep = self._workers[ret_id]
        service = wrep.service
        # make worker available again
        try:
            wq, wr = self._services[service]
            cp, msg = split_address(msg)
            self.client_response(cp, service, msg)
            wq.put(wrep.id)
            if wr:
                proto, rp, msg = wr.pop(0)
                self.on_client(proto, rp, msg)
        except KeyError:
            # unknown service
            self.disconnect(ret_id)
        return

    def on_heartbeat(self, rp, msg):
        """Process worker HEARTBEAT command.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        ret_id = rp[0]
        try:
            worker = self._workers[ret_id]
            if worker.is_alive():
                worker.on_heartbeat()
        except KeyError:
            # ignore HB for unknown worker
            pass
        return

    def on_disconnect(self, rp, msg):
        """Process worker DISCONNECT command.

        Unregisters the worker who sent this message.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        wid = rp[0]
        self.unregister_worker(wid)
        return

    def on_mmi(self, rp, service, msg):
        """Process MMI request.

        For now only mmi.service is handled.

        :param rp:      return address stack
        :type rp:       list of str
        :param service: the protocol id sent
        :type service:  str
        :param msg:     message parts
        :type msg:      list of str

        :rtype: None
        """
        if service == b'mmi.service':
            s = msg[0]
            ret = b'404'
            for wr in self._workers.values():
                if s == wr.service:
                    ret = b'200'
                    break
            self.client_response(rp, service, [ret])
        else:
            self.client_response(rp, service, [b'501'])
        return

    def on_client(self, proto, rp, msg):
        """Method called on client message.

        Frame 0 of msg is the requested service.
        The remaining frames are the request to forward to the worker.

        .. note::

           If the service is unknown to the broker the message is
           ignored.

        .. note::

           If currently no worker is available for a known service,
           the message is queued for later delivery.

        If a worker is available for the requested service, the
        message is repackaged and sent to the worker. The worker in
        question is removed from the pool of available workers.

        If the service name starts with `mmi.`, the message is passed to
        the internal MMI_ handler.

        .. _MMI: http://rfc.zeromq.org/spec:8

        :param proto: the protocol id sent
        :type proto:  str
        :param rp:    return address stack
        :type rp:     list of str
        :param msg:   message parts
        :type msg:    list of str

        :rtype: None
        """
##         print 'client message:'
##         pprint(msg)
        service = msg.pop(0)
        if service.startswith(b'mmi.'):
            self.on_mmi(rp, service, msg)
            return
        try:
            wq, wr = self._services[service]
            wid = wq.get()
            if not wid:
                # no worker ready
                # queue message
                msg.insert(0, service)
                wr.append((proto, rp, msg))
                return
            wrep = self._workers[wid]
            to_send = [ wrep.id, b'', self.WORKER_PROTO, b'\x02']
            to_send.extend(rp)
            to_send.append(b'')
            to_send.extend(msg)
            self.main_stream.send_multipart(to_send)
        except KeyError:
            # unknwon service
            # ignore request
            print 'broker has no service "%s"' % service
        return

    def on_worker(self, proto, rp, msg):
        """Method called on worker message.

        Frame 0 of msg is the command id.
        The remaining frames depend on the command.

        This method determines the command sent by the worker and
        calls the appropriate method. If the command is unknown the
        message is ignored and a DISCONNECT is sent.

        :param proto: the protocol id sent
        :type proto:  str
        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        cmd = msg.pop(0)
        if cmd in self._worker_cmds:
            fnc = self._worker_cmds[cmd]
            fnc(rp, msg)
        else:
            # ignore unknown command
            # DISCONNECT worker
            self.disconnect(rp[0])
        return

    def on_message(self, msg):
        """Processes given message.

        Decides what kind of message it is -- client or worker -- and
        calls the appropriate method. If unknown, the message is
        ignored.

        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        rp, msg = split_address(msg)
        # dispatch on first frame after path
        t = msg.pop(0)
        if t.startswith(b'MDPW'):
            self.on_worker(t, rp, msg)
        elif t.startswith(b'MDPC'):
            self.on_client(t, rp, msg)
        else:
            print 'Broker unknown Protocol: "%s"' % t
        return
Esempio n. 48
0
class GlinApp:
    """Main Class for Management"""
    def __init__(self, led_count, hw_backend, port=6606):
        self.ctx = zmq.Context()
        self.led_count = led_count
        self.port = port

        self.loop = IOLoop.instance()
        self.caller = PeriodicCallback(self._on_next_frame, 1000/30)
        self.hw_communication = hw_backend
        self.hw_communication.connect()
        self.zmq_collector = GlinAppZmqCollector(self, self.ctx)
        self.zmq_publisher = GlinAppZmqPublisher(self, self.ctx)

        # server side configuration
        self.config = SimpleNamespace()
        self.config.max_fps = 60

        # current state (somehow client side configuration)
        self.state = SimpleNamespace()
        self.state.animationClasses = []
        self.state.activeSceneId = None
        self.state.activeAnimation = None
        self.state.scenes = {}
        self.state.brightness = 1.0
        self.state.sceneIdCtr = 0
        self.state.mainswitch = True
        self.state.target_fps = 0
        self.state.lastFrameSent = None

    def set_brightness(self, brightness):
        """set general brightness in range 0...1"""
        brightness = min([1.0, max([brightness, 0.0])]) # enforces range 0 ... 1
        self.state.brightness = brightness
        self._repeat_last_frame()
        sequence_number = self.zmq_publisher.publish_brightness(brightness)
        logging.debug("Set brightness to {brightPercent:05.1f}%".format(brightPercent=brightness*100))
        return (True, sequence_number, "OK")

    def register_animation(self, animation_class):
        """Add a new animation"""
        self.state.animationClasses.append(animation_class)
        return len(self.state.animationClasses) - 1

    def add_scene(self, animation_id, name, color, velocity, config):
        """Add a new scene, returns Scene ID"""
        # check arguments
        if animation_id < 0 or animation_id >= len(self.state.animationClasses):
            err_msg = "Requested to register scene with invalid Animation ID. Out of range."
            logging.info(err_msg)
            return(False, 0, err_msg)
        if self.state.animationClasses[animation_id].check_config(config) is False:
            err_msg = "Requested to register scene with invalid configuration."
            logging.info(err_msg)
            return(False, 0, err_msg)
        self.state.sceneIdCtr += 1
        self.state.scenes[self.state.sceneIdCtr] = Scene(animation_id, name, color, velocity, config)
        sequence_number = self.zmq_publisher.publish_scene_add(self.state.sceneIdCtr, animation_id, name, color, velocity, config)
        logging.debug("Registered new scene.")

        # set this scene as active scene if none is configured yet
        if self.state.activeSceneId is None:
            self.set_scene_active(self.state.sceneIdCtr)
        return (True, sequence_number, "OK")

    def remove_scene(self, scene_id):
        """remove a scene by Scene ID"""
        if self.state.activeSceneId == scene_id:
            err_msg = "Requested to delete scene {sceneNum}, which is currently active. Cannot delete active scene.".format(sceneNum=scene_id)
            logging.info(err_msg)
            return(False, 0, err_msg)
        try:
            del self.state.scenes[scene_id]
            logging.debug("Deleted scene {sceneNum}".format(sceneNum=scene_id))
        except KeyError:
            err_msg = "Requested to delete scene {sceneNum}, which does not exist".format(sceneNum=scene_id)
            logging.info(err_msg)
            return(False, 0, err_msg)
        # if we are here, we deleted a scene, so publish it
        sequence_number = self.zmq_publisher.publish_scene_remove(scene_id)
        logging.debug("Removed scene {sceneNum}".format(sceneNum=scene_id))
        return (True, sequence_number, "OK")

    def set_scene_name(self, scene_id, name):
        """rename a scene by scene ID"""
        if not scene_id in self.state.scenes: # does that scene_id exist?
            err_msg = "Requested to rename scene {sceneNum}, which does not exist".format(sceneNum=scene_id)
            logging.info(err_msg)
            return(False, 0, err_msg)
        self.state.scenes[scene_id] = self.state.scenes[scene_id]._replace(name=name) # TODO: is there a better solution?
        sequence_number = self.zmq_publisher.publish_scene_name(scene_id, name)
        logging.debug("Renamed scene {sceneNum}".format(sceneNum=scene_id))
        return (True, sequence_number, "OK")

    def set_scene_config(self, scene_id, config):
        """reconfigure a scene by scene ID"""
        if not scene_id in self.state.scenes: # does that scene_id exist?
            err_msg = "Requested to reconfigure scene {sceneNum}, which does not exist".format(sceneNum=scene_id)
            logging.info(err_msg)
            return(False, 0, err_msg)
        if scene_id == self.state.activeSceneId:
            pass  # TODO: maybe calculate next frame, esp. if static scene
        self.state.scenes[scene_id] = self.state.scenes[scene_id]._replace(config=config)
        sequence_number = self.zmq_publisher.publish_scene_config(scene_id, config)
        logging.debug("Reconfigured scene {sceneNum}".format(sceneNum=scene_id))
        return (True, sequence_number, "OK")

    def set_scene_color(self, scene_id, color):
        """reconfigure a scene by scene ID"""
        if not scene_id in self.state.scenes: # does that scene_id exist?
            err_msg = "Requested to recolor scene {sceneNum}, which does not exist".format(sceneNum=scene_id)
            logging.info(err_msg)
            return(False, 0, err_msg)
        self.state.scenes[scene_id] = self.state.scenes[scene_id]._replace(color=color)
        sequence_number = self.zmq_publisher.publish_scene_color(scene_id, color)
        logging.debug("Recolored scene {sceneNum}".format(sceneNum=scene_id))
        if scene_id == self.state.activeSceneId:
            self.state.activeAnimation.set_color(color)
            self._do_next_frame() # TODO: make it more sensible, e.g. call only if static scene
        return (True, sequence_number, "OK")

    def set_scene_velocity(self, scene_id, velocity):
        """reconfigure a scene by scene ID"""
        if not scene_id in self.state.scenes: # does that scene_id exist?
            err_msg = "Requested to set velocity on scene {sceneNum}, which does not exist".format(sceneNum=scene_id)
            logging.info(err_msg)
            return(False, 0, err_msg)
        self.state.scenes[scene_id] = self.state.scenes[scene_id]._replace(velocity=velocity)
        sequence_number = self.zmq_publisher.publish_scene_velocity(scene_id, velocity)
        logging.debug("set velocity on scene {sceneNum}".format(sceneNum=scene_id))
        if scene_id == self.state.activeSceneId:
            self.state.activeAnimation.set_velocity(velocity)
            self._do_next_frame() # TODO: make it more sensible, e.g. call only if static scene
        return (True, sequence_number, "OK")

    def set_scene_active(self, scene_id):
        """sets the active scene by scene ID"""
        if self.state.activeSceneId != scene_id: # do nothing if scene has not changed
            self._deactivate_scene()
            sequence_number = self.zmq_publisher.publish_active_scene(scene_id)
            self.state.activeSceneId = scene_id
            if self.state.mainswitch is True: # activate scene only if we are switched on
                self._activate_scene()
            logging.debug("Set scene {sceneNum} as active scene".format(sceneNum=scene_id))
            return (True, sequence_number, "OK")
        else:
            logging.debug("Scene {sceneNum} already is active scene".format(sceneNum=scene_id))
            return (False, 0, "This already is the activated scene.")

    def set_mainswitch_state(self, state):
        """Turns output on or off. Also turns hardware on ir off"""
        if self.state.mainswitch == state:
            err_msg = "MainSwitch unchanged, already is {sState}".format(sState="On" if state else "Off") # fo obar lorem ipsum
            logging.debug(err_msg) # fo obar lorem ipsum
            return (False, 0, err_msg) # because nothing changed
        self.state.mainswitch = state
        sequence_number = self.zmq_publisher.publish_mainswitch_state(state)
        logging.debug("MainSwitch toggled, new state is {sState}".format(sState="On" if state else "Off")) # fo obar lorem ipsum
        if state is True:
            self.hw_communication.switch_on()
            self._activate_scene() # reinit scene
        else:
            self._deactivate_scene()
            self.hw_communication.switch_off()
        return (True, sequence_number, "OK")

    def toggle_mainswitch_state(self):
        """Toggles the mainswitch state"""
        return self.set_mainswitch_state(not self.state.mainswitch)

    def _activate_scene(self):
        if self.state.activeSceneId in self.state.scenes: # is scene_id valid? if not, assume there is no scene configured
            animation_class = self.state.animationClasses[self.state.scenes[self.state.activeSceneId].animation_id]
            self.state.activeAnimation = animation_class()
            target_fps = min(self.config.max_fps, self.state.activeAnimation.get_max_fps(), self.hw_communication.get_max_fps())
            if target_fps < 0:
                target_fps = 0
            self.state.target_fps = target_fps
            logging.debug("Running with {fps} FPS".format(fps=target_fps))
            self.state.activeAnimation.prepare(self.led_count, target_fps)
            self.state.activeAnimation.set_color(self.state.scenes[self.state.activeSceneId].color)
            self.state.activeAnimation.set_velocity(self.state.scenes[self.state.activeSceneId].velocity)
            self.state.activeAnimation.set_config(self.state.scenes[self.state.activeSceneId].config)
            if target_fps > 0:   # 0 FPS means one-shot -> no periodic callback required
                self.caller.callback_time = 1000/target_fps
                self.caller.start()
            self.loop.add_callback_from_signal(self._do_next_frame) # execute once to not have to wait for periodic callback (self.caller), esp. if 0 or low FPS
        else:
            self.state.activeAnimation = None # don't do anything. stuck with last frame.

    def _deactivate_scene(self):
        if not self.state.activeAnimation is None:
            self.caller.stop() # stop rendering new frames
            self.state.activeAnimation.finish()
            self.state.activeAnimation = None

    def _on_next_frame(self):
        logging.debug("generating next frame")
        self._do_next_frame()

    def _do_next_frame(self):
        if self.state.activeAnimation:
            buf = np.zeros((self.led_count, 3))
            self.state.activeAnimation.render_next_frame(buf)
            self.state.last_buf = np.copy(buf)
            self._send_frame(buf)
        else:
            logging.debug("app: No Active Animation")

    def _repeat_last_frame(self):
        # only do something, if there is an active animation, else output is considered to be turned off
        if hasattr(self.state, 'last_buf') and self.state.last_buf is not None and self.state.activeAnimation is not None:
            if self.state.target_fps < self.config.max_fps / 4: # to not overload hwbackend, only resend, if active animation is very slow
                self._send_frame(np.copy(self.state.last_buf))

    def _send_frame(self, buf):
        np.clip(buf, 0.0, 1.0, out=buf)
        self.state.lastFrameSent = datetime.datetime.now()
        buf *= self.state.brightness
        self.hw_communication.send(buf)

    def execute(self):
        """Execute Main Loop"""
        try:
            logging.debug("Entering IOLoop")
            self.loop.start()
            logging.debug("Leaving IOLoop")
        except KeyboardInterrupt:
            logging.debug("Leaving IOLoop by KeyboardInterrupt")
        finally:
            self.hw_communication.disconnect()
Esempio n. 49
0
class DkvServer(object):
    """ Dkv Server object """
    ctx = None                  # Context wrapper

    kvmap = None                # Key-value store
    sequence = 0                # How many updates so far

    #bstar = None
    #router = None               # Router socket used for DKV

    publisher = None            # Publish updates and hugz
    collector = None            # Collect updates from clients
    pending = None              # Pending updates from client

    primary = False             # True if we're primary
    master = False              # True if we're master
    slave = False               # True if we're slave

    _debug = False

    def add_peer(self, peer):
        log.info('DkvServer: Adding Peer %s.', peer.uuid)

        uuid = peer.uuid
        self.peers[uuid] = peer

    def remove_peer(self, peer):
        log.info('DkvServer: Removing Peer %s.', peer.uuid)

        uuid = peer.uuid
        del self.peers[uuid]

    @property
    def router_endpoint(self):
        return 'tcp://%s:%d' % (self.service_addr, self.port)

    def __init__(self, service_addr='*', port=conf.ports.dkv):
        self.service_addr = service_addr
        self.port = port

        if not self.ctx:
            self.ctx = zmq.Context.instance()
        self.loop = IOLoop.instance()

        # Base init
        self.peers = {}
        self.pending = []

        """TODO Automatic voting mechanism to pick primary"""

        if conf.hostname == 'san0':
            remote_host = 'san1'
            primary = True
        elif conf.hostname == 'san1':
            remote_host = 'san0'
            primary = False
        self.remote_host = remote_host
        self.primary = primary

        if primary:
            self.kvmap = {}
            bstar_local_ep = 'tcp://*:%d' % conf.ports.bstar_primary
            bstar_remote_ep = 'tcp://%s:%d' % (remote_host, conf.ports.bstar_secondary)
        else:
            bstar_local_ep = 'tcp://*:%d' % conf.ports.bstar_secondary
            bstar_remote_ep = 'tcp://%s:%d' % (remote_host, conf.ports.bstar_primary)

        # Setup router socket
        #self.router = self.ctx.socket(zmq.ROUTER)
        #self.router.bind(self.router_endpoint)
        #self.router = ZMQStream(self.router)
        #self.router.on_recv_stream(self.handle_snapshot)

        self.bstar = BinaryStar(primary, bstar_local_ep, bstar_remote_ep)
        self.bstar.register_voter(self.router_endpoint,
                                  zmq.ROUTER,
                                  self.handle_snapshot)

        # Register state change handlers
        self.bstar.master_callback = self.become_master
        self.bstar.slave_callback = self.become_slave

        # Set up our dkv server sockets
        self.publisher = self.ctx.socket(zmq.PUB)
        address = self.publisher_endpoint
        log.debug('Binding publisher on %s', address)
        self.publisher.bind(address)
        #self.publisher = ZMQStream(self.publisher)

        self.collector = self.ctx.socket(zmq.SUB)
        self.collector.setsockopt(zmq.SUBSCRIBE, b'')
        #self.collector.setsockopt(zmq.SUBSCRIBE, '/clients/updates')
        address = self.collector_endpoint
        log.debug('Binding collector on %s', address)
        self.collector.bind(address)
        self.collector = ZMQStream(self.collector)
        self.collector.on_recv(self.handle_collect)

        # Register our handlers with reactor
        self.flush_callback = PeriodicCallback(self.flush_ttl, 1000)
        self.hugz_callback = PeriodicCallback(self.send_hugz, 1000)

    @property
    def collector_port(self):
        return conf.ports.dkv_collector

    @property
    def publisher_port(self):
        return conf.ports.dkv_publisher

    @property
    def collector_endpoint(self):
        return 'tcp://%s:%d' % (self.service_addr, conf.ports.dkv_collector)

    @property
    def publisher_endpoint(self):
        return 'tcp://%s:%d' % (self.service_addr, conf.ports.dkv_publisher)

    def start(self, loop=True):
        log.debug('DkvServer starting')

        # start periodic callbacks
        self.flush_callback.start()
        self.hugz_callback.start()

        # Start bstar reactor until process interrupted
        self.bstar.start(loop=loop)

        #if loop:
        #    self.loop.start()

    def handle_snapshot(self, socket, msg):
        """snapshot requests"""
        #log.debug('handle_snapshot: Got msg=%s', msg[1:])

        if msg[1] != "ICANHAZ?" or len(msg) != 3:
            log.error("bad request, aborting")
            dump(msg)
            #self.bstar.loop.stop()
            return

        identity, request = msg[:2]

        if len(msg) >= 3:
            subtree = msg[2]
            # Send state snapshot to client
            route = Route(socket, identity, subtree)

            # For each entry in kvmap, send kvmsg to client
            for k, v in self.kvmap.items():
                send_single(k, v, route)

            # Now send END message with sequence number
            log.info("Sending state shapshot=%d", self.sequence)
            socket.send(identity, zmq.SNDMORE)
            kvmsg = KVMsg(self.sequence)
            kvmsg.key = "KTHXBAI"
            kvmsg.body = subtree
            kvmsg.send(socket)

    def handle_collect(self, msg):
        """Collect updates from clients

        If we're master, we apply these to the kvmap
        If we're slave, or unsure, we queue them on our pending list
        """
        if self._debug:
            log.debug('msg=%s', msg)

        #if len(msg) != 5:
        #    log.info('handle_collect: Got bad message %s.', msg)
        #    return

        kvmsg = KVMsg.from_msg(msg)
        if self.master:
            self.sequence += 1
            kvmsg.sequence = self.sequence
            kvmsg.send(self.publisher)
            ttl = kvmsg.get('ttl')
            if ttl:
                kvmsg['ttl'] = time.time() + ttl
            kvmsg.store(self.kvmap)
            log.info("publishing update=%d", self.sequence)
        else:
            # If we already got message from master, drop it, else
            # hold on pending list
            if not self.was_pending(kvmsg):
                self.pending.append(kvmsg)

    def was_pending(self, kvmsg):
        """If message was already on pending list, remove and return True.
        Else return False.
        """
        found = False
        for idx, held in enumerate(self.pending):
            if held.uuid == kvmsg.uuid:
                found = True
                break
        if found:
            self.pending.pop(idx)
        return found

    def flush_ttl(self):
        """Purge ephemeral values that have expired"""
        if self.kvmap:
            for key, kvmsg in self.kvmap.items():
                self.flush_single(kvmsg)

    def flush_single(self, kvmsg):
        """If key-value pair has expired, delete it and publish the fact
        to listening clients."""
        ttl = kvmsg.get('ttl')
        if ttl and ttl <= time.time():
            kvmsg.body = ""
            self.sequence += 1
            kvmsg.sequence = self.sequence
            kvmsg.send(self.publisher)
            del self.kvmap[kvmsg.key]
            log.debug("publishing delete=%d", self.sequence)

    def send_hugz(self):
        """Send hugz to anyone listening on the publisher socket"""
        if self._debug:
            log.debug('Sending HUGZ to publisher')

        kvmsg = KVMsg(self.sequence)
        kvmsg.key = "HUGZ"
        kvmsg.body = ""
        kvmsg.send(self.publisher)

    """
    State change handlers
    """

    def become_master(self):
        """We're becoming master

        The backup server applies its pending list to its own hash table,
        and then starts to process state snapshot requests.
        """
        log.info('Becoming master')

        self.master = True
        self.slave = False

        # stop receiving subscriber updates while we are master
        for peer in self.peers.values():
            peer.subscriber.stop_on_recv()

        # Apply pending list to own kvmap
        while self.pending:
            kvmsg = self.pending.pop(0)
            self.sequence += 1
            kvmsg.sequence = self.sequence
            kvmsg.store(self.kvmap)
            log.info("publishing pending=%d", self.sequence)

    def become_slave(self):
        """We're becoming slave"""
        log.info('Becoming slave')

        self.kvmap = None     # clear kvmap

        self.master = False
        self.slave = True

        # start receiving subscriber updates
        for peer in self.peers.values():
            peer.subscriber.on_recv(peer._on_subscriber_recv)

    def handle_subscriber(self, peer, msg):
        """Collect updates from peer (master)
        We're always slave when we get these updates
        """
        if self.master:
            if msg[0] != 'HUGZ':
                log.warn(
                    "Received subscriber message, but we are master msg=%s from_peer=%s", msg, peer.uuid)
                return

        # Get state snapshot if necessary
        if self.kvmap is None:
            self.kvmap = {}

            snapshot = peer.get_snapshot()
            while True:
                try:
                    kvmsg = KVMsg.recv(snapshot)
                except KeyboardInterrupt:
                    #self.bstar.loop.stop()
                    self.loop.stop()
                    return

                if kvmsg.key == "KTHXBAI":
                    self.sequence = kvmsg.sequence
                    break          # Done

                kvmsg.store(self.kvmap)

            log.info("received snapshot=%d", self.sequence)

        ##if not isinstance(msg, (tuple, list)) or len(msg) < 5:
        #if not len(msg) == 5:
        #    log.debug('bad msg=%s', msg)
        #    return

        # Find and remove update off pending list
        kvmsg = KVMsg.from_msg(msg)

        # update integer ttl -> timestamp
        ttl = kvmsg.get('ttl')
        if ttl is not None:
            kvmsg['ttl'] = time.time() + ttl

        if kvmsg.key != "HUGZ":
            if not self.was_pending(kvmsg):
                """ If master update came before client update, flip it
                around, store master update (with sequence) on pending
                list and use to clear client update when it comes later.
                """
                self.pending.append(kvmsg)

            # If update is more recent than our kvmap, apply it
            if (kvmsg.sequence > self.sequence):
                self.sequence = kvmsg.sequence
                kvmsg.store(self.kvmap)
                log.info("received update=%d", self.sequence)
Esempio n. 50
0
class ProxyHandler (BaseProxyHandler):
    """ Simply proxies further """

    log = skytools.getLogger ('h:ProxyHandler')

    ping_tick = 1

    def __init__ (self, hname, hcf, ccscript):
        super(ProxyHandler, self).__init__(hname, hcf, ccscript)

        self.ping_remote = self.cf.getbool ("ping", False)
        if self.ping_remote:
            self.echo_stats = EchoState (self.remote_url)
            self.echo_timer = PeriodicCallback (self.ping, self.ping_tick * 1000, self.ioloop)
            self.echo_timer.start()
            self.log.debug ("will ping %s", self.remote_url)

    def on_recv (self, zmsg):
        """ Got message from remote CC, process it. """
        try:
            # pongs to our pings should come back w/o any routing info
            if self.ping_remote and zmsg[0] == '':
                self.log.trace ("%r", zmsg)
                cmsg = CCMessage (zmsg)
                req = cmsg.get_dest()
                if req == "echo.response":
                    self._recv_pong (cmsg)
                else:
                    self.log.warn ("unknown msg: %s", req)
        except:
            self.log.exception ("crashed")
        finally:
            super(ProxyHandler, self).on_recv(zmsg)

    def _recv_pong (self, cmsg):
        """ Pong received, evaluate it. """

        msg = cmsg.get_payload (self.xtx)
        if not msg: return

        if msg.orig_target != self.remote_url:
            self.log.warn ("unknown pong: %s", msg.orig_target)
            return
        echo = self.echo_stats
        echo.update_pong (msg)

        rtt = echo.time_pong - msg.orig_time
        if msg.orig_time == echo.time_ping:
            self.log.trace ("echo time: %f s (%s)", rtt, self.remote_url)
        elif rtt <= 5 * self.ping_tick:
            self.log.debug ("late pong: %f s (%s)", rtt, self.remote_url)
        else:
            self.log.info ("too late pong: %f s (%s)", rtt, self.remote_url)

    def _send_ping (self):
        """ Send ping to remote CC. """
        msg = EchoRequestMessage(
                target = self.remote_url)
        cmsg = self.xtx.create_cmsg (msg)
        self.stream.send_cmsg (cmsg)
        self.echo_stats.update_ping (msg)
        self.log.trace ("%r", msg)

    def ping (self):
        """ Echo requesting and monitoring. """
        self.log.trace ("")
        miss = self.echo_stats.time_ping - self.echo_stats.time_pong
        if miss > 5 * self.ping_tick:
            self.log.warn ("no pong from %s for %f s", self.remote_url, miss)
        self._send_ping ()

    def stop (self):
        super(ProxyHandler, self).stop()
        self.log.info ("stopping")
        if hasattr (self, "echo_timer"):
            self.echo_timer.stop()
Esempio n. 51
0
class MDPWorker(object):

    """Class for the MDP worker side.

    Thin encapsulation of a zmq.XREQ socket.
    Provides a send method with optional timeout parameter.

    Will use a timeout to indicate a broker failure.
    """

    _proto_version = b'MDPW01'

    # TODO: integrate that into API
    HB_INTERVAL = 1000  # in milliseconds
    HB_LIVENESS = 3    # HBs to miss before connection counts as dead

    def __init__(self, context, endpoint, service):
        """Initialize the MDPWorker.

        context is the zmq context to create the socket from.
        service is a byte-string with the service name.
        """
        self.context = context
        self.endpoint = endpoint
        self.service = service
        self.stream = None
        self._tmo = None
        self.need_handshake = True
        self.ticker = None
        self._delayed_cb = None
        self._create_stream()
        return

    def _create_stream(self):
        """Helper to create the socket and the stream.
        """
        socket = self.context.socket(zmq.XREQ)
        ioloop = IOLoop.instance()
        self.stream = ZMQStream(socket, ioloop)
        self.stream.on_recv(self._on_message)
        self.stream.socket.setsockopt(zmq.LINGER, 0)
        self.stream.connect(self.endpoint)
        self.ticker = PeriodicCallback(self._tick, self.HB_INTERVAL)
        self._send_ready()
        self.ticker.start()
        return

    def _send_ready(self):
        """Helper method to prepare and send the workers READY message.
        """
        ready_msg = [ b'', self._proto_version, chr(1), self.service ]
        self.stream.send_multipart(ready_msg)
        self.curr_liveness = self.HB_LIVENESS
        return

    def _tick(self):
        """Method called every HB_INTERVAL milliseconds.
        """
        self.curr_liveness -= 1
##         print '%.3f tick - %d' % (time.time(), self.curr_liveness)
        self.send_hb()
        if self.curr_liveness >= 0:
            return
        print '%.3f lost connection' % time.time()
        # ouch, connection seems to be dead
        self.shutdown()
        # try to recreate it
        self._delayed_cb = DelayedCallback(self._create_stream, 5000)
        self._delayed_cb.start()
        return

    def send_hb(self):
        """Construct and send HB message to broker.
        """
        msg = [ b'', self._proto_version, chr(4) ]
        self.stream.send_multipart(msg)
        return

    def shutdown(self):
        """Method to deactivate the worker connection completely.

        Will delete the stream and the underlying socket.
        """
        if self.ticker:
            self.ticker.stop()
            self.ticker = None
        if not self.stream:
            return
        self.stream.socket.close()
        self.stream.close()
        self.stream = None
        self.timed_out = False
        self.need_handshake = True
        self.connected = False
        return

    def reply(self, msg):
        """Send the given message.

        msg can either be a byte-string or a list of byte-strings.
        """
##         if self.need_handshake:
##             raise ConnectionNotReadyError()
        # prepare full message
        to_send = self.envelope
        self.envelope = None
        if isinstance(msg, list):
            to_send.extend(msg)
        else:
            to_send.append(msg)
        self.stream.send_multipart(to_send)
        return

    def _on_message(self, msg):
        """Helper method called on message receive.

        msg is a list w/ the message parts
        """
        # 1st part is empty
        msg.pop(0)
        # 2nd part is protocol version
        # TODO: version check
        proto = msg.pop(0)
        # 3nd part is message type
        msg_type = msg.pop(0)
        # XXX: hardcoded message types!
        # any message resets the liveness counter
        self.need_handshake = False
        self.curr_liveness = self.HB_LIVENESS
        if msg_type == '\x05': # disconnect
            print '    DISC'
            self.curr_liveness = 0 # reconnect will be triggered by hb timer
        elif msg_type == '\x02': # request
            # remaining parts are the user message
            envelope, msg = split_address(msg)
            envelope.append(b'')
            envelope = [ b'', self._proto_version, '\x03'] + envelope # REPLY
            self.envelope = envelope
            self.on_request(msg)
        else:
            # invalid message
            # ignored
            pass
        return

    def on_request(self, msg):
        """Public method called when a request arrived.

        Must be overloaded!
        """
        pass
Esempio n. 52
0
class CryptoTransportLayer(TransportLayer):

    def __init__(self, my_ip, my_port, market_id, bm_user=None, bm_pass=None, bm_port=None, seed_mode=0, dev_mode=False):

        self._log = logging.getLogger('[%s] %s' % (market_id, self.__class__.__name__))
        requests_log = logging.getLogger("requests")
        requests_log.setLevel(logging.WARNING)

        # Connect to database
        MONGODB_URI = 'mongodb://*****:*****@localhost:{}/".format(bm_user, bm_pass, bm_port), verbose=0)
            result = self._bitmessage_api.add(2,3)
            self._log.info("Bitmessage test result: {}, API is live".format(result))
        # If we failed, fall back to starting our own
        except Exception as e:
            self._log.info("Failed to connect to bitmessage instance: {}".format(e))
            self._bitmessage_api = None
            # self._log.info("Spawning internal bitmessage instance")
            # # Add bitmessage submodule path
            # sys.path.insert(0, os.path.join(
            #     os.path.dirname(__file__), '..', 'pybitmessage', 'src'))
            # import bitmessagemain as bitmessage
            # bitmessage.logger.setLevel(logging.WARNING)
            # bitmessage_instance = bitmessage.Main()
            # bitmessage_instance.start(daemon=True)
            # bminfo = bitmessage_instance.getApiAddress()
            # if bminfo is not None:
            #     self._log.info("Started bitmessage daemon at %s:%s".format(
            #         bminfo['address'], bminfo['port']))
            #     bitmessage_api = xmlrpclib.ServerProxy("http://{}:{}@{}:{}/".format(
            #         bm_user, bm_pass, bminfo['address'], bminfo['port']))
            # else:
            #     self._log.info("Failed to start bitmessage dameon")
            #     self._bitmessage_api = None
        return result

    def _checkok(self, msg):
        self._log.info('Check ok')

    def get_guid(self):
        return self._guid

    def getDHT(self):
        return self._dht

    def getBitmessageAPI(self):
        return self._bitmessage_api

    def getMarketID(self):
        return self._market_id

    def getMyself(self):
        return self._myself

    def _ping(self, msg):

        self._log.info('Pinged %s ' % msg)

        pinger = CryptoPeerConnection(self,msg['uri'], msg['pubkey'], msg['senderGUID'])
        pinger.send_raw(json.dumps(
            {"type": "hello_response",
             "senderGUID": self.guid,
             "uri": self._uri,
             "senderNick": self._nickname,
             "pubkey": self.pubkey,
            }))


    def _storeValue(self, msg):
        self._dht._on_storeValue(msg)

    def _findNode(self, msg):
        self._dht.on_find_node(msg)

    def _findNodeResponse(self, msg):
        self._dht.on_findNodeResponse(self, msg)

    def _setup_settings(self):

        self.settings = self._db.settings.find_one({'id':"%s" % self._market_id})

        if self.settings:
            self._nickname = self.settings['nickname'] if self.settings.has_key("nickname") else ""
            self.secret = self.settings['secret'] if self.settings.has_key("secret") else ""
            self.pubkey = self.settings['pubkey'] if self.settings.has_key("pubkey") else ""
            self.guid = self.settings['guid'] if self.settings.has_key("guid") else ""
            self.sin = self.settings['sin'] if self.settings.has_key("sin") else ""
            self.bitmessage = self.settings['bitmessage'] if self.settings.has_key('bitmessage') else ""

        else:
            self._nickname = 'Default'

            # Generate Bitcoin keypair
            self._generate_new_keypair()

            # Generate Bitmessage address
            if self._bitmessage_api is not None:
                self._generate_new_bitmessage_address()

            # Generate PGP key
            gpg = gnupg.GPG()
            input_data = gpg.gen_key_input(key_type="RSA", key_length=2048, name_comment="Autogenerated by Open Bazaar", passphrase="P@ssw0rd")
            key = gpg.gen_key(input_data)
            pubkey_text = gpg.export_keys(key.fingerprint)

            self._db.settings.update({"id":'%s' % self._market_id}, {"$set": {"PGPPubKey":pubkey_text, "PGPPubkeyFingerprint":key.fingerprint}}, True)

            self.settings = self._db.settings.find_one({'id':"%s" % self._market_id})

        self._log.debug('Retrieved Settings: %s', self.settings)


    def _generate_new_keypair(self):

        # Generate new keypair
        key = ec.ECC(curve='secp256k1')
        self.secret = key.get_privkey().encode('hex')
        pubkey = key.get_pubkey()
        signedPubkey = key.sign(pubkey)
        self.pubkey = pubkey.encode('hex')
        self._myself = key

        # Generate SIN
        sha_hash = hashlib.sha256()
        sha_hash.update(pubkey)
        ripe_hash = hashlib.new('ripemd160')
        ripe_hash.update(sha_hash.digest())

        self.guid = ripe_hash.digest().encode('hex')
        self.sin = obelisk.EncodeBase58Check('\x0F\x02%s' + ripe_hash.digest())

        self._db.settings.update({"id":'%s' % self._market_id}, {"$set": {"secret":self.secret, "pubkey":self.pubkey, "guid":self.guid, "sin":self.sin}}, True)

    def _generate_new_bitmessage_address(self):
      # Use the guid generated previously as the key
      self.bitmessage = self._bitmessage_api.createRandomAddress(self.guid.encode('base64'),
            False, 1.05, 1.1111)
      self._db.settings.update({"id":'%s' % self._market_id}, {"$set": {"bitmessage":self.bitmessage}}, True)


    def join_network(self, dev_mode=0, callback=lambda msg: None):

        if dev_mode:
            self._log.info('DEV MODE')
            seed_peers = {'127.0.0.1'}
        else:
            seed_peers = ('seed.openbazaar.org',
                          'seed2.openbazaar.org')

        for seed in seed_peers:
            self._log.info('Initializing Seed Peer(s): [%s]' % seed)

            def cb(msg):
                self._dht._iterativeFind(self._guid, self._dht._knownNodes, 'findNode')
                callback(msg)

            self.connect('tcp://%s:12345' % seed, callback=cb)

        # Try to connect to known peers
        known_peers = self._db.peers.find()
        for known_peer in known_peers:
            def cb(msg):
                #self._dht._iterativeFind(self._guid, self._dht._knownNodes, 'findNode')
                callback(msg)

            self.connect(known_peer['uri'], callback=cb)


        # self.listen(self.pubkey) # Turn on zmq socket
        #
        # if seed_uri:
        #     self._log.info('Initializing Seed Peer(s): [%s]' % seed_uri)
        #     seed_peer = CryptoPeerConnection(self, seed_uri)
        #     self._dht.start(seed_peer)

    def connect(self, uri, callback):

        def cb(msg):
            ip = urlparse(uri).hostname
            port = urlparse(uri).port

            self._dht.add_known_node((ip, port, peer._guid))



            # Turning off peers
            #self._init_peer({'uri': seed_uri, 'guid':seed_guid})

            # Add to routing table
            #self.addCryptoPeer(peer)

            callback(msg)

        peer = CryptoPeerConnection(self, uri, callback=cb)

        return peer


    def get_crypto_peer(self, guid, uri, pubkey=None, nickname=None):

      if guid == self.guid:
        self._log.info('Trying to get crypto peer for yourself')
        return

      self._log.info('%s %s %s %s' % (guid, uri, pubkey, nickname))

      peer = CryptoPeerConnection(self, uri, pubkey, guid=guid, nickname=nickname)
      return peer

    def addCryptoPeer(self, peer_to_add):

        foundOutdatedPeer = False
        for idx, peer in enumerate(self._dht._activePeers):

            if (peer._address, peer._guid, peer._pub) == (peer_to_add._address, peer_to_add._guid, peer_to_add._pub):
                self._log.info('Found existing peer, not adding.')
                return

            if peer._guid == peer_to_add._guid or peer._pub == peer_to_add._pub or peer._address == peer_to_add._address:

                foundOutdatedPeer = True
                self._log.info('Found an outdated peer')

                # Update existing peer
                self._activePeers[idx] = peer_to_add

        if not foundOutdatedPeer and peer_to_add._guid != self._guid:
            self._log.info('Adding crypto peer at %s' % peer_to_add._nickname)
            self._dht.add_active_peer(self, (peer_to_add._pub, peer_to_add._address, peer_to_add._guid, peer_to_add._nickname))



    # Return data array with details from the crypto file
    # TODO: This needs to be protected better; potentially encrypted file or DB
    @staticmethod
    def load_crypto_details(store_file):
        with open(store_file) as f:
            data = json.loads(f.read())
        assert "nickname" in data
        assert "secret" in data
        assert "pubkey" in data
        assert len(data["secret"]) == 2 * 32
        assert len(data["pubkey"]) == 2 * 33

        return data["nickname"], data["secret"].decode("hex"), \
            data["pubkey"].decode("hex")

    def get_profile(self):
        peers = {}

        self.settings = self._db.settings.find_one({'id':"%s" % self._market_id})

        for uri, peer in self._peers.iteritems():
            if peer._pub:
                peers[uri] = peer._pub.encode('hex')
        return {'uri': self._uri, 'pub': self._myself.get_pubkey().encode('hex'),'nickname': self._nickname,
                'peers': peers}

    def respond_pubkey_if_mine(self, nickname, ident_pubkey):

        if ident_pubkey != self.pubkey:
            self._log.info("Public key does not match your identity")
            return

        # Return signed pubkey
        pubkey = self._myself.pubkey
        ec_key = obelisk.EllipticCurveKey()
        ec_key.set_secret(self.secret)
        digest = obelisk.Hash(pubkey)
        signature = ec_key.sign(digest)

        # Send array of nickname, pubkey, signature to transport layer
        self.send(proto_response_pubkey(nickname, pubkey, signature))

    def pubkey_exists(self, pub):

        for uri, peer in self._peers.iteritems():
            self._log.info('PEER: %s Pub: %s' %
                           (peer._pub.encode('hex'), pub.encode('hex')))
            if peer._pub.encode('hex') == pub.encode('hex'):
                return True

        return False

    def create_peer(self, uri, pub, node_guid):

        if pub:
            pub = pub.decode('hex')

        # Create the peer if public key is not already in the peer list
        # if not self.pubkey_exists(pub):
        self._peers[uri] = CryptoPeerConnection(self, uri, pub, node_guid)

        # Call 'peer' callbacks on listeners
        self.trigger_callbacks('peer', self._peers[uri])

        # else:
        #    print 'Pub Key is already in peer list'

    def send(self, data, send_to=None, callback=lambda msg: None):

        self._log.debug("Outgoing Data: %s %s" % (data, send_to))

        # Directed message
        if send_to is not None:

            peer = self._dht._routingTable.getContact(send_to)
            #peer = CryptoPeerConnection(msg['uri'])
            if peer:
                self._log.debug('Directed Data (%s): %s' % (send_to, data))

                try:
                    peer.send(data, callback=callback)
                except:
                    self._log.error('Not sending messing directly to peer')
            else:
                self._log.error('No peer found')

        else:
            # FindKey and then send

            for peer in self._dht._activePeers:
                try:
                    peer = self._dht._routingTable.getContact(peer._guid)
                    data['senderGUID'] = self._guid
                    data['pubkey'] = self.pubkey
                    print data
                    #if peer._pub:
                    #    peer.send(data, callback)
                    #else:


                    def cb(msg):
                        print 'msg %s' % msg

                    peer.send(data, cb)

                except:
                    self._log.info("Error sending over peer!")
                    traceback.print_exc()

    def send_enc(self, uri, msg):
        peer = self._peers[uri]
        pub = peer._pub

        # Now send a hello message to the peer
        if pub:
            self._log.info("Sending encrypted [%s] message to %s"
                           % (msg['type'], uri))
            peer.send(msg)
        else:
            # Will send clear profile on initial if no pub
            self._log.info("Sending unencrypted [%s] message to %s"
                           % (msg['type'], uri))
            self._peers[uri].send_raw(json.dumps(msg))


    def _init_peer(self, msg):

        uri = msg['uri']
        pub = msg.get('pub')
        nickname = msg.get('nickname')
        msg_type = msg.get('type')
        guid = msg['guid']

        if not self.valid_peer_uri(uri):
            self._log.error("Invalid Peer: %s " % uri)
            return

        if uri not in self._peers:
            # Unknown peer
            self._log.info('Add New Peer: %s' % uri)
            self.create_peer(uri, pub, guid)

            if not msg_type:
                self.send_enc(uri, hello_request(self.get_profile()))
            elif msg_type == 'hello_request':
                self.send_enc(uri, hello_response(self.get_profile()))

        else:
            # Known peer
            if pub:
                # test if we have to update the pubkey
                if not self._peers[uri]._pub:
                    self._log.info("Setting public key for seed node")
                    self._peers[uri]._pub = pub.decode('hex')
                    self.trigger_callbacks('peer', self._peers[uri])

                if self._peers[uri]._pub != pub.decode('hex'):
                    self._log.info("Updating public key for node")
                    self._peers[uri]._nickname = nickname
                    self._peers[uri]._pub = pub.decode('hex')

                    self.trigger_callbacks('peer', self._peers[uri])

            if msg_type == 'hello_request':
                # reply only if necessary
                self.send_enc(uri, hello_response(self.get_profile()))



    def _on_message(self, msg):

        # here goes the application callbacks
        # we get a "clean" msg which is a dict holding whatever
        #self._log.info("[On Message] Data received: %s" % msg)

        pubkey = msg.get('pubkey')
        uri = msg.get('uri')
        ip = urlparse(uri).hostname
        port = urlparse(uri).port
        guid = msg.get('senderGUID')
        nickname = msg.get('senderNick')

        self._log.info('on message %s' % nickname)

        self._dht.add_known_node((ip, port, guid, nickname))
        self._dht.add_active_peer(self, (pubkey, uri, guid, nickname))
        self.trigger_callbacks(msg['type'], msg)


    def _on_raw_message(self, serialized):

        try:
            # Try to de-serialize clear text message
            msg = json.loads(serialized)
            self._log.info("Message Received [%s]" % msg.get('type', 'unknown'))

            if msg.get('type') is None:

                data = msg.get('data').decode('hex')
                sig = msg.get('sig').decode('hex')

                try:
                    data = self._myself.decrypt(data)

                    self._log.debug('Signature: %s' % sig.encode('hex'))
                    self._log.debug('Signed Data: %s' % data)

                    guid =  json.loads(data).get('guid')
                    peer = self._dht._routingTable.getContact(guid)

                    ecc = ec.ECC(curve='secp256k1',pubkey=json.loads(data).get('pubkey').decode('hex'))

                    # Check signature
                    if ecc.verify(sig, data):
                        self._log.info('Verified')
                    else:
                        self._log.error('Message signature could not be verified %s' % msg)
                        return

                    msg = json.loads(data)
                    self._log.debug('Message Data %s ' % msg)
                except:
                    self._log.error('Could not decrypt message properly')

        except ValueError:
            try:
                # Encrypted?
                try:
                  msg = self._myself.decrypt(serialized)
                  msg = json.loads(msg)

                  self._log.info("Decrypted Message [%s]"
                               % msg.get('type', 'unknown'))
                except:
                  self._log.error("Could not decrypt message: %s" % msg)
                  return
            except:

                self._log.error('Message probably sent using incorrect pubkey')

                return

        if msg.get('type') is not None:

          msg_type = msg.get('type')
          msg_uri = msg.get('uri')
          msg_guid = msg.get('guid')

          self._on_message(msg)
        else:
          self._log.error('Received a message with no type')
Esempio n. 53
0
class TailWriter_Worker (threading.Thread):
    """ Simply appends to files """

    log = skytools.getLogger ('h:TailWriter_Worker')

    def __init__ (self, name, xtx, zctx, ioloop, dealer_url, router_url, params = {}):
        super(TailWriter_Worker, self).__init__(name=name)

        self.log = skytools.getLogger ('h:TailWriter_Worker' + name[name.rfind('-'):])
        #self.log = skytools.getLogger (self.log.name + name[name.rfind('-'):])
        #self.log = skytools.getLogger (name)

        self.xtx = xtx
        self.zctx = zctx
        self.ioloop = ioloop
        self.shared_url = dealer_url
        self.direct_url = router_url

        for k, v in params.items():
            self.log.trace ("setattr: %s -> %r", k, v)
            setattr (self, k, v)

        self.files = {}
        self.looping = True

    def startup (self):
        # announce channel (for new files)
        self.sconn = self.zctx.socket (zmq.XREP)
        self.sconn.connect (self.shared_url)
        # direct channel (for grabbed files)
        self.dconn = self.zctx.socket (zmq.XREQ)
        self.dconn.connect (self.direct_url)
        # polling interface
        self.poller = zmq.Poller()
        self.poller.register (self.sconn, zmq.POLLIN)
        self.poller.register (self.dconn, zmq.POLLIN)
        # schedule regular maintenance
        self.timer_maint = PeriodicCallback (self.do_maint, self.maint_period * 1000, self.ioloop)
        self.timer_maint.start()

    def run (self):
        self.log.info ("%s running", self.name)
        self.startup()
        while self.looping:
            try:
                self.work()
            except:
                self.log.exception ("worker crashed, dropping msg")
        self.shutdown()

    def work (self):
        socks = dict (self.poller.poll (1000))
        if self.dconn in socks and socks[self.dconn] == zmq.POLLIN:
            zmsg = self.dconn.recv_multipart()
        elif self.sconn in socks and socks[self.sconn] == zmq.POLLIN:
            zmsg = self.sconn.recv_multipart()
        else: # timeout
            return
        try:
            cmsg = CCMessage (zmsg)
        except:
            self.log.exception ("invalid CC message")
        else:
            self.handle_msg (cmsg)

    def handle_msg (self, cmsg):
        """ Got message from master, process it. """

        data = cmsg.get_payload (self.xtx)
        if not data: return

        mode = data['mode']
        host = data['hostname']
        fn = os.path.basename (data['filename'])
        op_mode = data.get('op_mode')
        st_dev = data.get('st_dev')
        st_ino = data.get('st_ino')

        # let master know asap :-)
        self._send_ack (host, st_dev, st_ino, data['filename'], data.get('fpos'))

        # sanitize
        host = host.replace ('/', '_')
        if mode not in ['', 'b']:
            self.log.warning ("unsupported fopen mode (%r), ignoring it", mode)
            mode = 'b'
        if op_mode not in [None, '', 'classic', 'rotated']:
            self.log.warning ("unsupported operation mode (%r), ignoring it", op_mode)
            op_mode = None

        # add file ext if needed
        if self.write_compressed == 'keep':
            if data['comp'] not in [None, '', 'none']:
                fn += comp_ext[data['comp']]
        elif self.write_compressed == 'yes':
            fn += comp_ext[self.compression]

        # Cache open files
        fi = (host, st_dev, st_ino, fn)
        if fi in self.files:
            fd = self.files[fi]
            if mode != fd['mode']:
                self.log.error ("fopen mode mismatch (%s -> %s)", mode, fd['mode'])
                return
            if op_mode != fd['op_mode']:
                self.log.error ("operation mode mismatch (%s -> %s)", op_mode, fd['op_mode'])
                return
        else:
            # decide destination file
            if self.host_subdirs:
                subdir = os.path.join (self.dstdir, host)
                dstfn = os.path.join (subdir, fn)
                if not os.path.isdir (subdir):
                    os.mkdir (subdir)
            else:
                dstfn = os.path.join (self.dstdir, '%s--%s' % (host, fn))
            if op_mode == 'rotated':
                dt = datetime.datetime.today()
                dstfn += dt.strftime (DATETIME_SUFFIX)

            fobj = open (dstfn, 'a' + mode)
            self.log.info ('opened %s', dstfn)

            now = time.time()
            fd = { 'obj': fobj, 'mode': mode, 'path': dstfn,
                   'wtime': now, 'ftime': now, 'buf': [], 'bufsize': 0,
                   'offset': 0, 'op_mode': op_mode }
            self.files[fi] = fd

        raw = cmsg.get_part3() # blob
        if not raw:
            raw = data['data'].decode('base64')

        if self.write_compressed in [None, '', 'no']:
            if data['comp'] not in (None, '', 'none'):
                body = cc.util.decompress (raw, data['comp'])
                self.log.debug ("decompressed from %i to %i", len(raw), len(body))
            else:
                body = raw
        elif self.write_compressed == 'keep':
            body = raw
        elif self.write_compressed == 'yes':
            if (data['comp'] != self.compression):
                deco = cc.util.decompress (raw, data['comp'])
                fd['buf'].append(deco)
                fd['bufsize'] += len(deco)
                if fd['bufsize'] < self.buf_maxbytes:
                    return
                body = self._process_buffer(fd)
            else:
                body = raw

        if hasattr (data, 'fpos') and (self.write_compressed in [None, '', 'no']
                or (self.write_compressed == 'keep' and data['comp'] in [None, '', 'none'])):
            fpos = fd['obj'].tell()
            if data['fpos'] != fpos + fd['offset']:
                self.log.warning ("sync lost: %i -> %i", fpos, data['fpos'])
                fd['offset'] = data['fpos'] - fpos

        # append to file
        self.log.debug ('appending %i bytes to %s', len(body), fd['path'])
        fd['obj'].write (body)
        fd['wtime'] = time.time()

        self.stat_inc ('appended_bytes', len(body))

    def _process_buffer (self, fd):
        """ Compress and reset write buffer """
        buf = ''.join(fd['buf'])
        out = cc.util.compress (buf, self.compression, {'level': self.compression_level})
        self.log.debug ("compressed from %i to %i", fd['bufsize'], len(out))
        fd['buf'] = []
        fd['bufsize'] = 0
        return out

    def _send_ack (self, hostname, st_dev, st_ino, filename, fpos):
        """ Send ack to master """
        rep = ReplyMessage(
                worker = self.name,
                d_hostname = hostname,
                d_st_dev = st_dev,
                d_st_ino = st_ino,
                d_filename = filename,
                d_fpos = fpos)
        rcm = self.xtx.create_cmsg (rep)
        rcm.send_to (self.dconn)

    def do_maint (self):
        """ Close long-open files; flush inactive files. """
        self.log.trace ('cleanup')
        now = time.time()
        zombies = []
        for k, fd in self.files.iteritems():
            if now - fd['wtime'] > CLOSE_DELAY:
                if fd['buf']:
                    body = self._process_buffer(fd)
                    fd['obj'].write(body)
                fd['obj'].close()
                self.log.info ('closed %s', fd['path'])
                zombies.append(k)
            elif (fd['wtime'] > fd['ftime']) and (now - fd['wtime'] > FLUSH_DELAY):
                # note: think about small writes within flush period
                fd['obj'].flush()
                self.log.debug ('flushed %s', fd['path'])
                fd['ftime'] = now
        for k in zombies:
                self.files.pop(k)

    def stop (self):
        self.looping = False

    def shutdown (self):
        """ Close all open files """
        self.log.info ('%s stopping', self.name)
        for fd in self.files.itervalues():
            if fd['buf']:
                body = self._process_buffer(fd)
                fd['obj'].write(body)
            fd['obj'].close()
            self.log.info ('closed %s', fd['path'])
Esempio n. 54
0
class Echo(CCHandler):
    """ Echo handler / sender / monitor """

    CC_ROLES = ['local', 'remote']

    log = skytools.getLogger('h:Echo')

    ping_tick = 1
    zmq_hwm = 1
    zmq_linger = 0

    def __init__(self, hname, hcf, ccscript):
        super(Echo, self).__init__(hname, hcf, ccscript)

        self.echoes = {}  # echo stats for monitored peers
        self.stream = {}  # connections to monitored peers

        for url in self.cf.getlist("ping-remotes", ""):
            sock = self._make_socket(url)
            self.stream[url] = CCStream(sock,
                                        ccscript.ioloop,
                                        qmaxsize=self.zmq_hwm)
            self.stream[url].on_recv(self.on_recv)
            self.echoes[url] = EchoState(url)
            self.log.debug("will ping %s", url)

        self.timer = PeriodicCallback(self.ping, self.ping_tick * 1000,
                                      self.ioloop)
        self.timer.start()

    def _make_socket(self, url):
        """ Create socket for pinging remote CC. """
        sock = self.zctx.socket(zmq.XREQ)
        try:
            sock.setsockopt(zmq.HWM, self.zmq_hwm)
        except AttributeError:
            sock.set_hwm(self.zmq_hwm)
        sock.setsockopt(zmq.LINGER, self.zmq_linger)
        sock.connect(url)
        return sock

    def on_recv(self, zmsg):
        """ Got reply from a remote CC, process it. """
        try:
            self.log.trace("%r", zmsg)
            cmsg = CCMessage(zmsg)
            req = cmsg.get_dest()
            if req == "echo.response":
                self.process_response(cmsg)
            else:
                self.log.warn("unknown msg: %s", req)
        except:
            self.log.exception("crashed, dropping msg")

    def handle_msg(self, cmsg):
        """ Got a message, process it. """

        self.log.trace("%r", cmsg)
        req = cmsg.get_dest()

        if req == "echo.request":
            self.process_request(cmsg)
        else:
            self.log.warn("unknown msg: %s", req)

    def process_request(self, cmsg):
        """ Ping received, respond with pong. """

        msg = cmsg.get_payload(self.xtx)
        if not msg: return

        rep = EchoResponseMessage(orig_hostname=msg['hostname'],
                                  orig_target=msg['target'],
                                  orig_time=msg['time'])
        rcm = self.xtx.create_cmsg(rep)
        rcm.take_route(cmsg)
        rcm.send_to(self.cclocal)

    def process_response(self, cmsg):
        """ Pong received, evaluate it. """

        msg = cmsg.get_payload(self.xtx)
        if not msg: return

        url = msg.orig_target
        if url not in self.echoes:
            self.log.warn("unknown pong: %s", url)
            return
        echo = self.echoes[url]
        echo.update_pong(msg)

        rtt = echo.time_pong - msg.orig_time
        if msg.orig_time == echo.time_ping:
            self.log.trace("echo time: %f s (%s)", rtt, url)
        elif rtt <= 5 * self.ping_tick:
            self.log.debug("late pong: %f s (%s)", rtt, url)
        else:
            self.log.info("too late pong: %f s (%s)", rtt, url)

    def send_request(self, url):
        """ Send ping to remote CC. """
        msg = EchoRequestMessage(target=url)
        cmsg = self.xtx.create_cmsg(msg)
        self.stream[url].send_cmsg(cmsg)
        self.echoes[url].update_ping(msg)
        self.log.trace("%r", msg)

    def ping(self):
        """ Echo requesting and monitoring. """
        self.log.trace("")
        for url in self.stream:
            echo = self.echoes[url]
            if echo.time_ping - echo.time_pong > 5 * self.ping_tick:
                self.log.warn("no pong from %s for %f s", url,
                              echo.time_ping - echo.time_pong)
            self.send_request(url)

    def stop(self):
        super(Echo, self).stop()
        self.log.info("stopping")
        self.timer.stop()
Esempio n. 55
0
class TailWriter (CCHandler):
    """ Simply appends to files (with help from workers) """

    CC_ROLES = ['remote']

    log = skytools.getLogger ('h:TailWriter')

    def __init__ (self, hname, hcf, ccscript):
        super(TailWriter, self).__init__(hname, hcf, ccscript)

        self.files = {}
        self.workers = []
        self.wparams = {} # passed to workers

        self.wparams['dstdir'] = self.cf.getfile ('dstdir')
        self.wparams['host_subdirs'] = self.cf.getbool ('host-subdirs', 0)
        self.wparams['maint_period'] = self.cf.getint ('maint-period', 3)
        self.wparams['write_compressed'] = self.cf.get ('write-compressed', '')
        assert self.wparams['write_compressed'] in [None, '', 'no', 'keep', 'yes']
        if self.wparams['write_compressed'] in ('keep', 'yes'):
            self.log.info ("position checking not supported for compressed files")
        if self.wparams['write_compressed'] == 'yes':
            self.wparams['compression'] = self.cf.get ('compression', '')
            if self.wparams['compression'] not in ('gzip', 'bzip2'):
                self.log.error ("unsupported compression: %s", self.wparams['compression'])
            self.wparams['compression_level'] = self.cf.getint ('compression-level', '')
            self.wparams['buf_maxbytes'] = cc.util.hsize_to_bytes (self.cf.get ('buffer-bytes', '1 MB'))
            if self.wparams['buf_maxbytes'] < BUF_MINBYTES:
                self.log.info ("buffer-bytes too low, adjusting: %i -> %i", self.wparams['buf_maxbytes'], BUF_MINBYTES)
                self.wparams['buf_maxbytes'] = BUF_MINBYTES

        # initialise sockets for communication with workers
        self.dealer_stream, self.dealer_url = self.init_comm (zmq.XREQ, 'inproc://workers-dealer', self.dealer_on_recv)
        self.router_stream, self.router_url = self.init_comm (zmq.XREP, 'inproc://workers-router', self.router_on_recv)

        self.launch_workers()

        self.timer_maint = PeriodicCallback (self.do_maint, self.wparams['maint_period'] * 1000, self.ioloop)
        self.timer_maint.start()

    def init_comm (self, stype, url, cb):
        """ Create socket, stream, etc for communication with workers. """
        sock = self.zctx.socket (stype)
        port = sock.bind_to_random_port (url)
        curl = "%s:%d" % (url, port)
        stream = CCStream (sock, self.ioloop)
        stream.on_recv (cb)
        return (stream, curl)

    def launch_workers (self):
        """ Create and start worker threads. """
        nw = self.cf.getint ('worker-threads', 10)
        for i in range (nw):
            wname = "%s.worker-%i" % (self.hname, i)
            self.log.info ("starting %s", wname)
            w = TailWriter_Worker(
                    wname, self.xtx, self.zctx, self.ioloop,
                    self.dealer_url, self.router_url, self.wparams)
            w.stat_inc = self.stat_inc # XXX
            self.workers.append (w)
            w.start()

    def handle_msg (self, cmsg):
        """ Got message from client, process it. """

        data = cmsg.get_payload (self.xtx)
        if not data: return

        host = data['hostname']
        fn = data['filename']
        st_dev = data.get('st_dev')
        st_ino = data.get('st_ino')

        fi = (host, st_dev, st_ino, fn)
        if fi in self.files:
            fd = self.files[fi]
            if fd.waddr: # already accepted ?
                self.log.trace ("passing %r to %s", fn, fd.wname)
                fd.queue.append (cmsg)
                fd.send_to (self.router_stream)
            else:
                self.log.trace ("queueing %r", fn)
                fd.queue.append (cmsg)
        else:
            fd = FileState (fi, 1)
            self.files[fi] = fd
            self.log.trace ("offering %r", fn)
            self.dealer_stream.send_cmsg (cmsg)

    def dealer_on_recv (self, zmsg):
        """ Got reply from worker via "dealer" connection """
        self.log.warning ("reply via dealer: %s", zmsg)

    def router_on_recv (self, zmsg):
        """ Got reply from worker via "router" connection """
        cmsg = CCMessage (zmsg)
        data = cmsg.get_payload (self.xtx)
        fi = (data['d_hostname'], data['d_st_dev'], data['d_st_ino'], data['d_filename'])
        fd = self.files[fi]
        if fd.waddr is None:
            fd.waddr = zmsg[0]
            fd.wname = data['worker']
        else:
            assert fd.waddr == zmsg[0] and fd.wname == data['worker']
        fd.atime = time.time()
        fd.count -= 1
        assert fd.count >= 0

    def do_maint (self):
        """ Check & flush queues; drop inactive files. """
        self.log.trace ('cleanup')
        now = time.time()
        zombies = []
        for k, fd in self.files.iteritems():
            if fd.queue and fd.waddr:
                self.log.trace ("passing %r to %s", fd.ident, fd.wname)
                fd.send_to (self.router_stream)
            if (fd.count == 0) and (now - fd.atime > 2 * CLOSE_DELAY): # you'd better use msg for this
                self.log.debug ("forgetting %r", fd.ident)
                zombies.append(k)
        for k in zombies:
                self.files.pop(k)

    def stop (self):
        """ Signal workers to shut down. """
        super(TailWriter, self).stop()
        self.log.info ('stopping')
        self.timer_maint.stop()
        for w in self.workers:
            self.log.info ("signalling %s", w.name)
            w.stop()
Esempio n. 56
0
class CryptoTransportLayer(TransportLayer):

    def __init__(self, my_ip, my_port, market_id, db, bm_user=None, bm_pass=None,
                 bm_port=None, seed_mode=0, dev_mode=False, disable_ip_update=False):

        self.log = logging.getLogger(
            '[%s] %s' % (market_id, self.__class__.__name__)
        )
        requests_log = logging.getLogger("requests")
        requests_log.setLevel(logging.WARNING)

        # Connect to database
        self.db = db

        self.bitmessage_api = None
        if (bm_user, bm_pass, bm_port) != (None, None, None):
            if not self._connect_to_bitmessage(bm_user, bm_pass, bm_port):
                self.log.info('Bitmessage not installed or started')

        try:
            socket.inet_pton(socket.AF_INET6, my_ip)
            my_uri = "tcp://[%s]:%s" % (my_ip, my_port)
        except (socket.error, ValueError):
            my_uri = "tcp://%s:%s" % (my_ip, my_port)

        self.market_id = market_id
        self.nick_mapping = {}
        self.uri = my_uri
        self.ip = my_ip
        self.nickname = ""
        self._dev_mode = dev_mode

        # Set up
        self._setup_settings()

        self.dht = DHT(self, self.market_id, self.settings, self.db)

        # self._myself = ec.ECC(pubkey=self.pubkey.decode('hex'),
        #                       privkey=self.secret.decode('hex'),
        #                       curve='secp256k1')

        TransportLayer.__init__(self, market_id, my_ip, my_port,
                                self.guid, self.nickname)

        self.setup_callbacks()
        self.listen(self.pubkey)

        if seed_mode == 0 and not dev_mode and not disable_ip_update:
            self.start_ip_address_checker()

    def setup_callbacks(self):
        self.add_callbacks([('hello', self._ping),
                            ('findNode', self._find_node),
                            ('findNodeResponse', self._find_node_response),
                            ('store', self._store_value)])

    def start_ip_address_checker(self):
        '''Checks for possible public IP change'''
        self.caller = PeriodicCallback(self._ip_updater_periodic_callback, 5000, ioloop.IOLoop.instance())
        self.caller.start()

    def _ip_updater_periodic_callback(self):
        try:
            r = requests.get('http://ipv4.icanhazip.com')

            if r and hasattr(r, 'text'):
                ip = r.text
                ip = ip.strip(' \t\n\r')
                if ip != self.ip:
                    self.ip = ip
                    try:
                        socket.inet_pton(socket.AF_INET6, self.ip)
                        my_uri = 'tcp://[%s]:%s' % (self.ip, self.port)
                    except (socket.error, ValueError):
                        my_uri = 'tcp://%s:%s' % (self.ip, self.port)
                    self.uri = my_uri
                    self.stream.close()
                    self.listen(self.pubkey)

                    self.dht._iterativeFind(self.guid, [], 'findNode')
            else:
                self.log.error('Could not get IP')
        except Exception as e:
            self.log.error('[Requests] error: %s' % e)

    def save_peer_to_db(self, peer_tuple):
        uri = peer_tuple[0]
        pubkey = peer_tuple[1]
        guid = peer_tuple[2]
        nickname = peer_tuple[3]

        # Update query
        self.db.deleteEntries("peers", {"uri": uri, "guid": guid}, "OR")
        # if len(results) > 0:
        #     self.db.updateEntries("peers", {"id": results[0]['id']}, {"market_id": self.market_id, "uri": uri, "pubkey": pubkey, "guid": guid, "nickname": nickname})
        # else:
        if guid is not None:
            self.db.insertEntry("peers", {
                "uri": uri,
                "pubkey": pubkey,
                "guid": guid,
                "nickname": nickname,
                "market_id": self.market_id
            })

    def _connect_to_bitmessage(self, bm_user, bm_pass, bm_port):
        # Get bitmessage going
        # First, try to find a local instance
        result = False
        try:
            self.log.info('[_connect_to_bitmessage] Connecting to Bitmessage on port %s' % bm_port)
            self.bitmessage_api = xmlrpclib.ServerProxy("http://{}:{}@localhost:{}/".format(bm_user, bm_pass, bm_port), verbose=0)
            result = self.bitmessage_api.add(2, 3)
            self.log.info("[_connect_to_bitmessage] Bitmessage API is live: %s", result)
        # If we failed, fall back to starting our own
        except Exception as e:
            self.log.info("Failed to connect to bitmessage instance: {}".format(e))
            self.bitmessage_api = None
            # self._log.info("Spawning internal bitmessage instance")
            # # Add bitmessage submodule path
            # sys.path.insert(0, os.path.join(
            #     os.path.dirname(__file__), '..', 'pybitmessage', 'src'))
            # import bitmessagemain as bitmessage
            # bitmessage.logger.setLevel(logging.WARNING)
            # bitmessage_instance = bitmessage.Main()
            # bitmessage_instance.start(daemon=True)
            # bminfo = bitmessage_instance.getApiAddress()
            # if bminfo is not None:
            #     self._log.info("Started bitmessage daemon at %s:%s".format(
            #         bminfo['address'], bminfo['port']))
            #     bitmessage_api = xmlrpclib.ServerProxy("http://{}:{}@{}:{}/".format(
            #         bm_user, bm_pass, bminfo['address'], bminfo['port']))
            # else:
            #     self._log.info("Failed to start bitmessage dameon")
            #     self._bitmessage_api = None
        return result

    def _checkok(self, msg):
        self.log.info('Check ok')

    def get_guid(self):
        return self.guid

    def get_dht(self):
        return self.dht

    def get_bitmessage_api(self):
        return self.bitmessage_api

    def get_market_id(self):
        return self.market_id

    # def get_myself(self):
    #     return self._myself

    def _ping(self, msg):

        self.log.info('Pinged %s ' % json.dumps(msg, ensure_ascii=False))
        #
        # pinger = CryptoPeerConnection(self, msg['uri'], msg['pubkey'], msg['senderGUID'])
        # pinger.send_raw(json.dumps(
        #     {"type": "hello_response",
        #      "senderGUID": self.guid,
        #      "uri": self.uri,
        #      "senderNick": self.nickname,
        #      "pubkey": self.pubkey,
        #     }))

    def _store_value(self, msg):
        self.dht._on_storeValue(msg)

    def _find_node(self, msg):
        self.dht.on_find_node(msg)

    def _find_node_response(self, msg):
        self.dht.on_findNodeResponse(self, msg)

    def _setup_settings(self):

        try:
            self.settings = self.db.selectEntries("settings", {"market_id": self.market_id})
        except (OperationalError, DatabaseError) as e:
            print e
            raise SystemExit("database file %s corrupt or empty - cannot continue" % self.db.db_path)

        if len(self.settings) == 0:
            self.settings = {"market_id": self.market_id, "welcome": "enable"}
            self.db.insertEntry("settings", self.settings)
        else:
            self.settings = self.settings[0]

        # Generate PGP key during initial setup or if previous PGP gen failed
        if not ('PGPPubKey' in self.settings and self.settings["PGPPubKey"]):
            try:
                self.log.info('Generating PGP keypair. This may take several minutes...')
                print 'Generating PGP keypair. This may take several minutes...'
                gpg = gnupg.GPG()
                input_data = gpg.gen_key_input(key_type="RSA",
                                               key_length=2048,
                                               name_email='*****@*****.**',
                                               name_comment="Autogenerated by Open Bazaar",
                                               passphrase="P@ssw0rd")
                assert input_data is not None
                key = gpg.gen_key(input_data)
                assert key is not None

                pubkey_text = gpg.export_keys(key.fingerprint)
                newsettings = {"PGPPubKey": pubkey_text, "PGPPubkeyFingerprint": key.fingerprint}
                self.db.updateEntries("settings", {"market_id": self.market_id}, newsettings)
                self.settings.update(newsettings)

                self.log.info('PGP keypair generated.')
            except Exception as e:
                self.log.error("Encountered a problem with GPG: %s" % e)
                raise SystemExit("Encountered a problem with GPG: %s" % e)

        if not ('pubkey' in self.settings and self.settings['pubkey']):
            # Generate Bitcoin keypair
            self._generate_new_keypair()

        if not ('nickname' in self.settings and self.settings['nickname']):
            newsettings = {'nickname': 'Default'}
            self.db.updateEntries('settings', {"market_id": self.market_id}, newsettings)
            self.settings.update(newsettings)

        self.nickname = self.settings['nickname'] if 'nickname' in self.settings else ""
        self.secret = self.settings['secret'] if 'secret' in self.settings else ""
        self.pubkey = self.settings['pubkey'] if 'pubkey' in self.settings else ""
        self.privkey = self.settings.get('privkey')
        self.btc_pubkey = privkey_to_pubkey(self.privkey)
        self.guid = self.settings['guid'] if 'guid' in self.settings else ""
        self.sin = self.settings['sin'] if 'sin' in self.settings else ""
        self.bitmessage = self.settings['bitmessage'] if 'bitmessage' in self.settings else ""

        if not ('bitmessage' in self.settings and self.settings['bitmessage']):
            # Generate Bitmessage address
            if self.bitmessage_api is not None:
                self._generate_new_bitmessage_address()

        self._myself = ec.ECC(
            pubkey=pubkey_to_pyelliptic(self.pubkey).decode('hex'),
            raw_privkey=self.secret.decode('hex'),
            curve='secp256k1'
        )

        self.log.debug('Retrieved Settings: \n%s', pformat(self.settings))

    def _generate_new_keypair(self):
        secret = str(random.randrange(2 ** 256))
        self.secret = hashlib.sha256(secret).hexdigest()
        self.pubkey = privtopub(self.secret)
        self.privkey = random_key()
        print 'PRIVATE KEY: ', self.privkey
        self.btc_pubkey = privtopub(self.privkey)
        print 'PUBLIC KEY: ', self.btc_pubkey

        # Generate SIN
        sha_hash = hashlib.sha256()
        sha_hash.update(self.pubkey)
        ripe_hash = hashlib.new('ripemd160')
        ripe_hash.update(sha_hash.digest())

        self.guid = ripe_hash.digest().encode('hex')
        self.sin = obelisk.EncodeBase58Check('\x0F\x02%s' + ripe_hash.digest())

        newsettings = {
            "secret": self.secret,
            "pubkey": self.pubkey,
            "privkey": self.privkey,
            "guid": self.guid,
            "sin": self.sin
        }
        self.db.updateEntries("settings", {"market_id": self.market_id}, newsettings)
        self.settings.update(newsettings)

    def _generate_new_bitmessage_address(self):
        # Use the guid generated previously as the key
        self.bitmessage = self.bitmessage_api.createRandomAddress(
            self.guid.encode('base64'),
            False,
            1.05,
            1.1111
        )
        newsettings = {"bitmessage": self.bitmessage}
        self.db.updateEntries("settings", {"market_id": self.market_id}, newsettings)
        self.settings.update(newsettings)

    def join_network(self, seed_peers=None, callback=lambda msg: None):
        if seed_peers is None:
            seed_peers = []

        self.log.info('Joining network')

        known_peers = []

        # Connect up through seed servers
        for idx, seed in enumerate(seed_peers):
            try:
                socket.inet_pton(socket.AF_INET6, seed)
                seed_peers[idx] = "tcp://[%s]:12345" % seed
            except (socket.error, ValueError):
                seed_peers[idx] = "tcp://%s:12345" % seed

        # Connect to persisted peers
        db_peers = self.get_past_peers()

        known_peers = list(set(seed_peers)) + list(set(db_peers))

        self.connect_to_peers(known_peers)

        # TODO: This needs rethinking. Normally we can search for ourselves
        #       but because we are not connected to them quick enough this
        #       will always fail. Need @gubatron to review
        # Populate routing table by searching for self
        # if len(known_peers) > 0:
        #     self.search_for_my_node()

        if callback is not None:
            callback('Joined')

    def get_past_peers(self):
        peers = []
        result = self.db.selectEntries("peers", {"market_id": self.market_id})
        for peer in result:
            peers.append(peer['uri'])
        return peers

    def search_for_my_node(self):
        print 'Searching for myself'
        self.dht._iterativeFind(self.guid, self.dht.knownNodes, 'findNode')

    def connect_to_peers(self, known_peers):
        for known_peer in known_peers:
            t = Thread(target=self.dht.add_peer, args=(self, known_peer,))
            t.start()

    def get_crypto_peer(self, guid=None, uri=None, pubkey=None, nickname=None,
                        callback=None):
        if guid == self.guid:
            self.log.error('Cannot get CryptoPeerConnection for your own node')
            return

        self.log.debug('Getting CryptoPeerConnection' +
                       '\nGUID:%s\nURI:%s\nPubkey:%s\nNickname:%s' %
                       (guid, uri, pubkey, nickname))

        return connection.CryptoPeerConnection(self,
                                               uri,
                                               pubkey,
                                               guid=guid,
                                               nickname=nickname)

    def addCryptoPeer(self, peer_to_add):

        foundOutdatedPeer = False
        for idx, peer in enumerate(self.dht.activePeers):

            if (peer.address, peer.guid, peer.pub) == \
               (peer_to_add.address, peer_to_add.guid, peer_to_add.pub):
                self.log.info('Found existing peer, not adding.')
                return

            if peer.guid == peer_to_add.guid or \
               peer.pub == peer_to_add.pub or \
               peer.address == peer_to_add.address:

                foundOutdatedPeer = True
                self.log.info('Found an outdated peer')

                # Update existing peer
                self.activePeers[idx] = peer_to_add
                self.dht.add_peer(self,
                                  peer_to_add.address,
                                  peer_to_add.pub,
                                  peer_to_add.guid,
                                  peer_to_add.nickname)

        if not foundOutdatedPeer and peer_to_add.guid != self.guid:
            self.log.info('Adding crypto peer at %s' % peer_to_add.nickname)
            self.dht.add_peer(self,
                              peer_to_add.address,
                              peer_to_add.pub,
                              peer_to_add.guid,
                              peer_to_add.nickname)

    def get_profile(self):
        peers = {}

        self.settings = self.db.selectEntries("settings", {"market_id": self.market_id})[0]
        for uri, peer in self.peers.iteritems():
            if peer.pub:
                peers[uri] = peer.pub.encode('hex')
        return {'uri': self.uri,
                'pub': self._myself.get_pubkey().encode('hex'),
                'nickname': self.nickname,
                'peers': peers}

    def respond_pubkey_if_mine(self, nickname, ident_pubkey):

        if ident_pubkey != self.pubkey:
            self.log.info("Public key does not match your identity")
            return

        # Return signed pubkey
        pubkey = self._myself.pubkey
        ec_key = obelisk.EllipticCurveKey()
        ec_key.set_secret(self.secret)
        digest = obelisk.Hash(pubkey)
        signature = ec_key.sign(digest)

        # Send array of nickname, pubkey, signature to transport layer
        self.send(proto_response_pubkey(nickname, pubkey, signature))

    def pubkey_exists(self, pub):

        for peer in self.peers.itervalues():
            self.log.info(
                'PEER: %s Pub: %s' % (
                    peer.pub.encode('hex'), pub.encode('hex')
                )
            )
            if peer.pub.encode('hex') == pub.encode('hex'):
                return True

        return False

    def create_peer(self, uri, pub, node_guid):

        if pub:
            pub = pub.decode('hex')

        # Create the peer if public key is not already in the peer list
        # if not self.pubkey_exists(pub):
        self.peers[uri] = connection.CryptoPeerConnection(self, uri, pub, node_guid)

        # Call 'peer' callbacks on listeners
        self.trigger_callbacks('peer', self.peers[uri])

        # else:
        #    print 'Pub Key is already in peer list'

    def send(self, data, send_to=None, callback=lambda msg: None):

        self.log.debug("Outgoing Data: %s %s" % (data, send_to))

        # Directed message
        if send_to is not None:

            peer = self.dht.routingTable.getContact(send_to)
            if not peer:
                for activePeer in self.dht.activePeers:
                    if activePeer.guid == send_to:
                        peer = activePeer
                        break

            # peer = CryptoPeerConnection(msg['uri'])
            if peer:
                self.log.debug('Directed Data (%s): %s' % (send_to, data))
                try:
                    peer.send(data, callback=callback)
                except Exception as e:
                    self.log.error('Not sending message directly to peer %s' % e)
            else:
                self.log.error('No peer found')

        else:
            # FindKey and then send

            for peer in self.dht.activePeers:
                try:
                    peer = self.dht.routingTable.getContact(peer.guid)
                    data['senderGUID'] = self.guid
                    data['pubkey'] = self.pubkey

                    def cb(msg):
                        self.log.debug('Message Back: \n%s' % pformat(msg))

                    peer.send(data, cb)

                except:
                    self.log.info("Error sending over peer!")
                    traceback.print_exc()

    def send_enc(self, uri, msg):
        peer = self.peers[uri]
        pub = peer.pub

        # Now send a hello message to the peer
        if pub:
            self.log.info(
                "Sending encrypted [%s] message to %s" % (
                    msg['type'], uri
                )
            )
            peer.send(msg)
        else:
            # Will send clear profile on initial if no pub
            self.log.info(
                "Sending unencrypted [%s] message to %s" % (
                    msg['type'], uri
                )
            )
            self.peers[uri].send_raw(json.dumps(msg))

    def _init_peer(self, msg):

        uri = msg['uri']
        pub = msg.get('pub')
        nickname = msg.get('nickname')
        msg_type = msg.get('type')
        guid = msg['guid']

        if not self.valid_peer_uri(uri):
            self.log.error("Invalid Peer: %s " % uri)
            return

        if uri not in self.peers:
            # Unknown peer
            self.log.info('Add New Peer: %s' % uri)
            self.create_peer(uri, pub, guid)

            if not msg_type:
                self.send_enc(uri, hello_request(self.get_profile()))
            elif msg_type == 'hello_request':
                self.send_enc(uri, hello_response(self.get_profile()))

        else:
            # Known peer
            if pub:
                # test if we have to update the pubkey
                if not self.peers[uri].pub:
                    self.log.info("Setting public key for seed node")
                    self.peers[uri].pub = pub.decode('hex')
                    self.trigger_callbacks('peer', self.peers[uri])

                if self.peers[uri].pub != pub.decode('hex'):
                    self.log.info("Updating public key for node")
                    self.peers[uri].nickname = nickname
                    self.peers[uri].pub = pub.decode('hex')

                    self.trigger_callbacks('peer', self.peers[uri])

            if msg_type == 'hello_request':
                # reply only if necessary
                self.send_enc(uri, hello_response(self.get_profile()))

    def _on_message(self, msg):

        # here goes the application callbacks
        # we get a "clean" msg which is a dict holding whatever
        # self.log.info("[On Message] Data received: %s" % msg)

        pubkey = msg.get('pubkey')
        uri = msg.get('uri')
        ip = urlparse(uri).hostname
        port = urlparse(uri).port
        guid = msg.get('senderGUID')
        nickname = msg.get('senderNick')[:120]

        self.dht.add_known_node((ip, port, guid, nickname))
        self.log.info('On Message: %s' % json.dumps(msg, ensure_ascii=False))
        self.dht.add_peer(self, uri, pubkey, guid, nickname)
        t = Thread(target=self.trigger_callbacks, args=(msg['type'], msg,))
        t.start()

    def _on_raw_message(self, serialized):
        try:

            msg = json.loads(serialized)
            self.log.info("Message Received [%s]" % msg.get('type', 'unknown'))

            if msg.get('type') is None:

                data = msg.get('data').decode('hex')
                sig = msg.get('sig').decode('hex')

                try:
                    cryptor = makePrivCryptor(self.secret)

                    try:
                        data = cryptor.decrypt(data)
                    except Exception as e:
                        self.log.info('Exception: %s' % e)

                    self.log.debug('Signature: %s' % sig.encode('hex'))
                    self.log.debug('Signed Data: %s' % data)

                    # Check signature
                    data_json = json.loads(data)
                    sigCryptor = makePubCryptor(data_json['pubkey'])
                    if sigCryptor.verify(sig, data):
                        self.log.info('Verified')
                    else:
                        self.log.error('Message signature could not be verified %s' % msg)
                        # return

                    msg = json.loads(data)
                    self.log.debug('Message Data %s ' % msg)
                except Exception as e:
                    self.log.error('Could not decrypt message properly %s' % e)

        except ValueError:
            try:
                # Encrypted?
                try:
                    msg = self._myself.decrypt(serialized)
                    msg = json.loads(msg)

                    self.log.info(
                        "Decrypted Message [%s]" % msg.get('type', 'unknown')
                    )
                except:
                    self.log.error("Could not decrypt message: %s" % msg)
                    return
            except:
                self.log.error('Message probably sent using incorrect pubkey')

                return

        if msg.get('type') is not None:
            self._on_message(msg)
        else:
            self.log.error('Received a message with no type')

    def shutdown(self):
        print "CryptoTransportLayer.shutdown()!"
        try:
            TransportLayer.shutdown(self)
            print "CryptoTransportLayer.shutdown(): ZMQ sockets destroyed."
        except Exception as e:
            self.log.error("Transport shutdown error: " + e.message)

        print "Notice: explicit DHT Shutdown not implemented."

        try:
            self.bitmessage_api.close()
        except Exception as e:
            # might not even be open, not much more we can do on our way out if exception thrown here.
            self.log.error("Could not shutdown bitmessage_api's ServerProxy. " + e.message)
Esempio n. 57
0
class TailWriter(CCHandler):
    """ Simply appends to files (with help from workers) """

    CC_ROLES = ['remote']

    log = skytools.getLogger('h:TailWriter')

    def __init__(self, hname, hcf, ccscript):
        super(TailWriter, self).__init__(hname, hcf, ccscript)

        self.files = {}
        self.workers = []
        self.wparams = {}  # passed to workers

        self.wparams['dstdir'] = self.cf.getfile('dstdir')
        self.wparams['host_subdirs'] = self.cf.getbool('host-subdirs', 0)
        self.wparams['maint_period'] = self.cf.getint('maint-period', 3)
        self.wparams['write_compressed'] = self.cf.get('write-compressed', '')
        assert self.wparams['write_compressed'] in [
            None, '', 'no', 'keep', 'yes'
        ]
        if self.wparams['write_compressed'] in ('keep', 'yes'):
            self.log.info(
                "position checking not supported for compressed files")
        if self.wparams['write_compressed'] == 'yes':
            self.wparams['compression'] = self.cf.get('compression', '')
            if self.wparams['compression'] not in ('gzip', 'bzip2'):
                self.log.error("unsupported compression: %s",
                               self.wparams['compression'])
            self.wparams['compression_level'] = self.cf.getint(
                'compression-level', '')
            self.wparams['buf_maxbytes'] = cc.util.hsize_to_bytes(
                self.cf.get('buffer-bytes', '1 MB'))
            if self.wparams['buf_maxbytes'] < BUF_MINBYTES:
                self.log.info("buffer-bytes too low, adjusting: %i -> %i",
                              self.wparams['buf_maxbytes'], BUF_MINBYTES)
                self.wparams['buf_maxbytes'] = BUF_MINBYTES

        # initialise sockets for communication with workers
        self.dealer_stream, self.dealer_url = self.init_comm(
            zmq.XREQ, 'inproc://workers-dealer', self.dealer_on_recv)
        self.router_stream, self.router_url = self.init_comm(
            zmq.XREP, 'inproc://workers-router', self.router_on_recv)

        self.launch_workers()

        self.timer_maint = PeriodicCallback(
            self.do_maint, self.wparams['maint_period'] * 1000, self.ioloop)
        self.timer_maint.start()

    def init_comm(self, stype, url, cb):
        """ Create socket, stream, etc for communication with workers. """
        sock = self.zctx.socket(stype)
        port = sock.bind_to_random_port(url)
        curl = "%s:%d" % (url, port)
        stream = CCStream(sock, self.ioloop)
        stream.on_recv(cb)
        return (stream, curl)

    def launch_workers(self):
        """ Create and start worker threads. """
        nw = self.cf.getint('worker-threads', 10)
        for i in range(nw):
            wname = "%s.worker-%i" % (self.hname, i)
            self.log.info("starting %s", wname)
            w = TailWriter_Worker(wname, self.xtx, self.zctx, self.ioloop,
                                  self.dealer_url, self.router_url,
                                  self.wparams)
            w.stat_inc = self.stat_inc  # XXX
            self.workers.append(w)
            w.start()

    def handle_msg(self, cmsg):
        """ Got message from client, process it. """

        data = cmsg.get_payload(self.xtx)
        if not data: return

        host = data['hostname']
        fn = data['filename']
        st_dev = data.get('st_dev')
        st_ino = data.get('st_ino')

        fi = (host, st_dev, st_ino, fn)
        if fi in self.files:
            fd = self.files[fi]
            if fd.waddr:  # already accepted ?
                self.log.trace("passing %r to %s", fn, fd.wname)
                fd.queue.append(cmsg)
                fd.send_to(self.router_stream)
            else:
                self.log.trace("queueing %r", fn)
                fd.queue.append(cmsg)
        else:
            fd = FileState(fi, 1)
            self.files[fi] = fd
            self.log.trace("offering %r", fn)
            self.dealer_stream.send_cmsg(cmsg)

    def dealer_on_recv(self, zmsg):
        """ Got reply from worker via "dealer" connection """
        self.log.warning("reply via dealer: %s", zmsg)

    def router_on_recv(self, zmsg):
        """ Got reply from worker via "router" connection """
        cmsg = CCMessage(zmsg)
        data = cmsg.get_payload(self.xtx)
        fi = (data['d_hostname'], data['d_st_dev'], data['d_st_ino'],
              data['d_filename'])
        fd = self.files[fi]
        if fd.waddr is None:
            fd.waddr = zmsg[0]
            fd.wname = data['worker']
        else:
            assert fd.waddr == zmsg[0] and fd.wname == data['worker']
        fd.atime = time.time()
        fd.count -= 1
        assert fd.count >= 0

    def do_maint(self):
        """ Check & flush queues; drop inactive files. """
        self.log.trace('cleanup')
        now = time.time()
        zombies = []
        for k, fd in self.files.iteritems():
            if fd.queue and fd.waddr:
                self.log.trace("passing %r to %s", fd.ident, fd.wname)
                fd.send_to(self.router_stream)
            if (fd.count == 0) and (now - fd.atime > 2 * CLOSE_DELAY
                                    ):  # you'd better use msg for this
                self.log.debug("forgetting %r", fd.ident)
                zombies.append(k)
        for k in zombies:
            self.files.pop(k)

    def stop(self):
        """ Signal workers to shut down. """
        super(TailWriter, self).stop()
        self.log.info('stopping')
        self.timer_maint.stop()
        for w in self.workers:
            self.log.info("signalling %s", w.name)
            w.stop()