示例#1
0
class Client(object):
    def __exit__(self, exc_type, exc_val, exc_tb):
        self.engine.stop()

    def __init__(self, channel=ZYRE_CHANNEL, *args, **kvargs):

        self.logger = logging.getLogger('pyre')
        self.channel = channel
        self.engine = Pyre(self.channel)
        self.id = self.engine.uuid()

    def start(self, ctx, pipe):
        self.logger.info('joining channel')
        self.engine.join(self.channel)

        self.logger.info('starting engine...')
        self.engine.start()

        self.logger.info('id is: {}'.format(self.id))

        poller = zmq.Poller()
        poller.register(pipe, zmq.POLLIN)
        poller.register(self.engine.socket(), zmq.POLLIN)

        while True:
            items = dict(poller.poll())
            if pipe in items and items[pipe] == zmq.POLLIN:
                message = pipe.recv()

                # message to quit
                if message.decode('utf-8') == "$$STOP":
                    break

                self.logger.info("CHAT_TASK: %s" % message)
                self.engine.shouts(self.channel, message.decode('utf-8'))
            else:
                cmds = self.engine.recv()
                self.logger.info('HMMM {}'.format(cmds))

                msg_type = cmds.pop(0)
                self.logger.info("NODE_MSG TYPE: %s" % msg_type)
                self.logger.info("NODE_MSG PEER: %s" %
                                 uuid.UUID(bytes=cmds.pop(0)))
                self.logger.info("NODE_MSG NAME: %s" % cmds.pop(0))

                if msg_type.decode('utf-8') == "SHOUT":
                    self.logger.info("NODE_MSG GROUP: %s" % cmds.pop(0))
                elif msg_type.decode('utf-8') == "ENTER":
                    headers = json.loads(cmds.pop(0).decode('utf-8'))
                    self.logger.info("NODE_MSG HEADERS: %s" % headers)

                    for key in headers:
                        self.logger.info("key = {0}, value = {1}".format(
                            key, headers[key]))

                self.logger.info("NODE_MSG CONT: %s" % cmds)

        self.engine.stop()
示例#2
0
class Client(object):

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.engine.stop()

    def __init__(self, channel=ZYRE_CHANNEL, *args, **kvargs):

        self.logger = logging.getLogger('pyre')
        self.channel = channel
        self.engine = Pyre(self.channel)
        self.id = self.engine.uuid()

    def start(self, ctx, pipe):
        self.logger.info('joining channel')
        self.engine.join(self.channel)

        self.logger.info('starting engine...')
        self.engine.start()

        self.logger.info('id is: {}'.format(self.id))

        poller = zmq.Poller()
        poller.register(pipe, zmq.POLLIN)
        poller.register(self.engine.socket(), zmq.POLLIN)

        while True:
            items = dict(poller.poll())
            if pipe in items and items[pipe] == zmq.POLLIN:
                message = pipe.recv()

                # message to quit
                if message.decode('utf-8') == "$$STOP":
                    break

                self.logger.info("CHAT_TASK: %s" % message)
                self.engine.shouts(self.channel, message.decode('utf-8'))
            else:
                cmds = self.engine.recv()
                self.logger.info('HMMM {}'.format(cmds))

                msg_type = cmds.pop(0)
                self.logger.info("NODE_MSG TYPE: %s" % msg_type)
                self.logger.info("NODE_MSG PEER: %s" % uuid.UUID(bytes=cmds.pop(0)))
                self.logger.info("NODE_MSG NAME: %s" % cmds.pop(0))

                if msg_type.decode('utf-8') == "SHOUT":
                    self.logger.info("NODE_MSG GROUP: %s" % cmds.pop(0))
                elif msg_type.decode('utf-8') == "ENTER":
                    headers = json.loads(cmds.pop(0).decode('utf-8'))
                    self.logger.info("NODE_MSG HEADERS: %s" % headers)

                    for key in headers:
                        self.logger.info("key = {0}, value = {1}".format(key, headers[key]))

                self.logger.info("NODE_MSG CONT: %s" % cmds)

        self.engine.stop()
示例#3
0
    def __init__(self, name, state: State, node: Pyre, log=None, messageBoard=None):
        if log == None:
            log = []
        if messageBoard == None:
            messageBoard = MemoryBoard()

        super().__init__(node.uuid().hex, state, log, messageBoard, [])
        self._node = node
        self._human_name = name
示例#4
0
    def gaze_exchange_task(self, ctx, pipe):
        """
        Task for exchanging messages
        Args:
            ctx(zmq.Context): the zmq context
            pipe(zmq.PAIR pipe): the pipe for exchanging messages
        Returns: (zmq.PAIR pipe) the pipe
        """
        n = Pyre("GAZE_EXCHANGE")
        self.publisher_id = n.uuid()
        n.join(GROUP_GAZE_EXCHANGE)
        n.start()

        poller = zmq.Poller()
        # noinspection PyUnresolvedReferences
        poller.register(pipe, zmq.POLLIN)
        # noinspection PyUnresolvedReferences
        poller.register(n.socket(), zmq.POLLIN)
        while not self.stopped:
            items = dict(poller.poll())
            print(n.socket(), items)
            # noinspection PyUnresolvedReferences
            if pipe in items and items[pipe] == zmq.POLLIN:
                message = pipe.recv()
                # message to quit
                message = message.decode('utf-8')
                if message == STOP_MESSAGE:
                    break
                print("GAZE_EXCHANGE_TASK: {}".format(message))
                self.save_gaze_from_message(message)
                n.shouts(GROUP_GAZE_EXCHANGE, message)
            else:
                cmds = n.recv()
                msg_type = cmds.pop(0)
                print("NODE_MSG TYPE: %s" % msg_type)
                print("NODE_MSG PEER: %s" % uuid.UUID(bytes=cmds.pop(0)))
                print("NODE_MSG NAME: %s" % cmds.pop(0))
                if msg_type.decode('utf-8') == "SHOUT":
                    print("NODE_MSG GROUP: %s" % cmds.pop(0))
                    message = cmds.pop(0).decode("utf-8")
                    self.save_gaze_from_message(message)
                elif msg_type.decode('utf-8') == "ENTER":
                    headers = json.loads(cmds.pop(0).decode('utf-8'))
                    print("NODE_MSG HEADERS: %s" % headers)
                    for key in headers:
                        print("key = {0}, value = {1}".format(
                            key, headers[key]))
                print("NODE_MSG CONT: %s" % cmds)
        n.stop()
def rethinkdb_writer(ctx, pipe):

    # Database setup
    with open('../configuration.json') as data_file:
        configuration = json.load(data_file)

    group_name = configuration['zyreMediator']['group']

    n = Pyre(configuration['zyreMediator']['name'])
    n.set_interface('usb0')
    n.set_header('TYPE', configuration['zyreMediator']['type'])
    n.join(group_name)
    n.start()

    # Zyre setup
    poller = zmq.Poller()
    poller.register(pipe, zmq.POLLIN)
    poller.register(n.inbox, zmq.POLLIN)

    database_configuration = configuration['database']['mongoDB']
    mongo_connection = MongoClient(database_configuration['host'], database_configuration['port'])

    meteor = mongo_connection['meteor']

    # Add this module to the database
    meteor['modules'].insert([{
        '_id': str(n.uuid()),
        'name': n.name(),
        'type': configuration['zyreMediator']['type'],
        'parent': None
    }])

    ready_message = {
        'type': 'state',
        'senderId': str(n.uuid()),
        'payload': 2
    }

    def logMessage(message_to_log):
        message_to_log['timestamp'] = datetime.datetime.utcnow()
        meteor['events'].insert_one(message_to_log)
        del message_to_log['timestamp']
        del message_to_log['_id']

    n.shout(group_name, json.dumps(ready_message))
    logMessage(ready_message)

    module_name_to_uid_map = {}

    while True:
        items = dict(poller.poll(10))

        if pipe in items and items[pipe] == zmq.POLLIN:
            message = pipe.recv()
            # message to quit
            if message.decode('utf-8') == '$$STOP':
                break

        if n.inbox in items and items[n.inbox] == zmq.POLLIN:

            msg_frame = n.recv()
            msg_type = msg_frame.pop(0)
            peer_uid = uuid.UUID(bytes=msg_frame.pop(0))
            peer_name = msg_frame.pop(0)
            print('NODE_MSG TYPE: %s' % msg_type)
            print('NODE_MSG PEER: %s' % str(peer_uid))
            print('NODE_MSG NAME: %s' % peer_name)

            if msg_type.decode('utf-8') == 'ENTER':

                headers = json.loads(msg_frame.pop(0))

                try:
                    module_type = headers['type']
                except KeyError:
                    print("Your header doesn't contain your type of module")
                    module_type = 'unknown'

                try:
                    parent_module_id = headers['parentId']
                except KeyError:
                    print("The header doesn't contain the module's parent id")
                    parent_module_id = None

                # creates an entry with all known information about the robot
                # in the database if the robot is not in the database
                meteor['modules'].insert_one({
                    '_id': str(peer_uid),
                    'name': peer_name,
                    'type': module_type,
                    'parent': parent_module_id
                })

                module_name_to_uid_map[peer_name] = str(peer_uid)

            elif msg_type.decode('utf-8') == 'EXIT':
                meteor['modules'].remove({'_id': str(peer_uid)})

            elif msg_type.decode('utf-8') == 'SHOUT':

                # write message to database
                group = msg_frame.pop(0)
                try:
                    data = json.loads(msg_frame[0])
                except:
                    data = {}
                    print 'Invalid JSON string'

                # print data

                data['senderId'] = str(peer_uid)
                logMessage(data)

            elif msg_type.decode('utf-8') == 'WHISPER':
                # write message to database
                try:
                    data = json.loads(msg_frame[0])
                except:
                    print 'Invalid JSON string'

                logMessage(data)

    meteor['modules'].remove({'_id': str(n.uuid())})
    n.stop()
示例#6
0
文件: network.py 项目: vedb/pyndsi
class _NetworkNode(NetworkInterface):
    """
    Communication node

    Creates Pyre node and handles all communication.
    """
    def __init__(self,
                 format: DataFormat,
                 context=None,
                 name=None,
                 headers=(),
                 callbacks=()):
        self._name = name
        self._format = format
        self._headers = headers
        self._pyre_node = None
        self._context = context or zmq.Context()
        self._sensors_by_host = {}
        self._callbacks = [self._on_event] + list(callbacks)

    # Public NetworkInterface API

    @property
    def has_events(self) -> bool:
        return self.running and self._pyre_node.socket().get(
            zmq.EVENTS) & zmq.POLLIN

    @property
    def running(self) -> bool:
        return bool(self._pyre_node)

    @property
    def sensors(self) -> typing.Mapping[str, NetworkSensor]:
        sensors = {}
        for sensor in self._sensors_by_host.values():
            sensors.update(sensor)
        return sensors

    @property
    def callbacks(self) -> typing.Iterable[NetworkEventCallback]:
        return self._callbacks

    @callbacks.setter
    def callbacks(self, value: typing.Iterable[NetworkEventCallback]):
        self._callbacks = value

    def start(self):
        # Setup node
        logger.debug("Starting network...")
        self._pyre_node = Pyre(self._name)
        self._name = self._pyre_node.name()
        for header in self._headers:
            self._pyre_node.set_header(*header)
        self._pyre_node.join(self._group)
        self._pyre_node.start()

    def whisper(self, peer, msg_p):
        if self._format == DataFormat.V3:
            return  # no-op
        elif self._format == DataFormat.V4:
            self._pyre_node.whisper(peer, msg_p)
        else:
            raise NotImplementedError()

    def rejoin(self):
        for sensor_uuid, sensor in list(self.sensors.items()):
            self._execute_callbacks({
                "subject": "detach",
                "sensor_uuid": sensor_uuid,
                "sensor_name": sensor["sensor_name"],
                "host_uuid": sensor["host_uuid"],
                "host_name": sensor["host_name"],
            })
        self._pyre_node.leave(self._group)
        self._pyre_node.join(self._group)

    def stop(self):
        logger.debug("Stopping network...")
        self._pyre_node.leave(self._group)
        self._pyre_node.stop()
        self._pyre_node = None

    def handle_event(self):
        if not self.has_events:
            return
        event = PyreEvent(self._pyre_node)
        uuid = event.peer_uuid
        if event.type == "SHOUT" or event.type == "WHISPER":
            try:
                payload = event.msg.pop(0).decode()
                msg = serial.loads(payload)
                msg["subject"]
                msg["sensor_uuid"]
                msg["host_uuid"] = event.peer_uuid.hex
                msg["host_name"] = event.peer_name
            except serial.decoder.JSONDecodeError:
                logger.warning('Malformatted message: "{}"'.format(payload))
            except (ValueError, KeyError):
                logger.warning("Malformatted message: {}".format(msg))
            except Exception:
                logger.debug(tb.format_exc())
            else:
                if msg["subject"] == "attach":
                    if self.sensors.get(msg["sensor_uuid"]):
                        # Sensor already attached. Drop event
                        return
                    sensor_type = SensorType.supported_sensor_type_from_str(
                        msg["sensor_type"])
                    if sensor_type is None:
                        logger.debug("Unsupported sensor type: {}".format(
                            msg["sensor_type"]))
                        return
                elif msg["subject"] == "detach":
                    sensor_entry = self.sensors.get(msg["sensor_uuid"])
                    # Check if sensor has been detached already
                    if not sensor_entry:
                        return
                    msg.update(sensor_entry)
                else:
                    logger.debug("Unknown host message: {}".format(msg))
                    return
                self._execute_callbacks(msg)
        elif event.type == "JOIN":
            # possible values for `group_version`
            # - [<unrelated group>]
            # - [<unrelated group>, <unrelated version>]
            # - ['pupil-mobile']
            # - ['pupil-mobile', <version>]
            group_version = event.group.split("-v")
            group = group_version[0]
            version = group_version[1] if len(group_version) > 1 else "0"

        elif event.type == "EXIT":
            gone_peer = event.peer_uuid.hex
            for host_uuid, sensors in list(self._sensors_by_host.items()):
                if host_uuid != gone_peer:
                    continue
                for sensor_uuid, sensor in list(sensors.items()):
                    self._execute_callbacks({
                        "subject":
                        "detach",
                        "sensor_uuid":
                        sensor_uuid,
                        "sensor_name":
                        sensor["sensor_name"],
                        "host_uuid":
                        host_uuid,
                        "host_name":
                        sensor["host_name"],
                    })
        else:
            logger.debug("Dropping {}".format(event))

    def sensor(
        self,
        sensor_uuid: str,
        callbacks: typing.Iterable[NetworkEventCallback] = ()
    ) -> Sensor:
        try:
            sensor_settings = self.sensors[sensor_uuid].copy()
        except KeyError:
            raise ValueError(
                '"{}" is not an available sensor id.'.format(sensor_uuid))

        sensor_type_str = sensor_settings.pop("sensor_type", "unknown")
        sensor_type = SensorType.supported_sensor_type_from_str(
            sensor_type_str)

        if sensor_type is None:
            raise ValueError('Sensor of type "{}" is not supported.'.format(
                sensor_type_str))

        return Sensor.create_sensor(
            sensor_type=sensor_type,
            format=self._format,
            context=self._context,
            callbacks=callbacks,
            **sensor_settings,
        )

    # Public

    def __str__(self):
        return "<{} {} [{}]>".format(__name__, self._name,
                                     self._pyre_node.uuid().hex)

    # Private

    @property
    def _group(self) -> str:
        return group_name_from_format(self._format)

    def _execute_callbacks(self, event):
        for callback in self.callbacks:
            callback(self, event)

    def _on_event(self, caller, event):
        if event["subject"] == "attach":
            subject_less = event.copy()
            del subject_less["subject"]
            host_uuid = event["host_uuid"]
            host_sensor = {event["sensor_uuid"]: subject_less}
            try:
                self._sensors_by_host[host_uuid].update(host_sensor)
            except KeyError:
                self._sensors_by_host[host_uuid] = host_sensor
            logger.debug(f'Attached {host_uuid}.{event["sensor_uuid"]}')
        elif event["subject"] == "detach":
            for host_uuid, sensors in self._sensors_by_host.items():
                try:
                    del sensors[event["sensor_uuid"]]
                    logger.debug(
                        f'Detached {host_uuid}.{event["sensor_uuid"]}')
                except KeyError:
                    pass
            hosts_to_remove = [
                host_uuid
                for host_uuid, sensors in self._sensors_by_host.items()
                if len(sensors) == 0
            ]
            for host_uuid in hosts_to_remove:
                del self._sensors_by_host[host_uuid]
示例#7
0
class Bridge(object):
    """docstring for Bridge"""
    def __init__(self, uvc_id):
        super(Bridge, self).__init__()

        self.data_seq = 0
        self.note_seq = 0

        # init capture
        self.cap = uvc.Capture(uvc_id)
        logger.info('Initialised uvc device %s'%self.cap.name)

        # init pyre
        self.network = Pyre(socket.gethostname()+self.cap.name[-4:])
        self.network.start()
        logger.info('Bridging under "%s"'%self.network.name())

        # init sensor sockets
        ctx = zmq.Context()
        generic_url = 'tcp://*:*'
        public_ep   = self.network.endpoint()
        self.note, self.note_url = self.bind(ctx, zmq.PUB , generic_url, public_ep)
        self.data, self.data_url = self.bind(ctx, zmq.PUB , generic_url, public_ep,set_hwm=1)
        self.cmd , self.cmd_url  = self.bind(ctx, zmq.PULL, generic_url, public_ep)

    def loop(self):
        logger.info('Entering bridging loop...')
        self.network.shout('pupil-mobile', self.sensor_attach_json())
        try:
            while True:
                self.poll_network()
                self.poll_cmd_socket()
                self.publish_frame()

        except KeyboardInterrupt:
            pass
        except Exception:
            import traceback
            traceback.print_exc()
        finally:
            self.network.shout('pupil-mobile', json.dumps({
                'subject'   : 'detach',
                'sensor_uuid': self.network.uuid().hex
            }))
            logger.info('Leaving bridging loop...')

    def publish_frame(self):
        frame = self.cap.get_frame_robust()
        now = int(time.time()*1000000)
        index = self.data_seq
        self.data_seq += 1
        self.data_seq %= sequence_limit

        jpeg_buffer = frame.jpeg_buffer
        meta_data = struct.pack('<LLLLQLL', 0x10, frame.width, frame.height, index, now, jpeg_buffer.size, 0)
        self.data.send_multipart([self.network.uuid().hex, meta_data, jpeg_buffer])

    def poll_network(self):
        while has_data(self.network.socket()):
            event = PyreEvent(self.network)
            if event.type == 'JOIN' and event.group == 'pupil-mobile':
                self.network.whisper(event.peer_uuid, self.sensor_attach_json())

    def poll_cmd_socket(self):
        while has_data(self.cmd):
            sensor, cmd_str = self.cmd.recv_multipart()
            try:
                cmd = json.loads(cmd_str)
            except Exception as e:
                logger.debug('Could not parse received cmd: %s'%cmd_str)
            else:
                logger.debug('Received cmd: %s'%cmd)
                if cmd.get('action') == 'refresh_controls':
                    self.publish_controls()
                elif cmd.get('action') == 'set_control_value':
                    val = cmd.get('value', 0)
                    if cmd.get('control_id') == 'CAM_RATE':
                        self.cap.frame_rate = self.cap.frame_rates[val]
                    elif cmd.get('control_id') == 'CAM_RES':
                        self.cap.frame_size = self.cap.frame_sizes[val]
                    self.publish_controls()


    def __del__(self):
        self.note.close()
        self.data.close()
        self.cmd.close()
        self.network.stop()

    def publish_controls(self):
        self.note.send_multipart([
            self.network.uuid().hex,
            self.frame_size_control_json()])
        self.note.send_multipart([
            self.network.uuid().hex,
            self.frame_rate_control_json()])

    def sensor_attach_json(self):
        sensor = {
            "subject"         : "attach",
            "sensor_name"     : self.cap.name,
            "sensor_uuid"     : self.network.uuid().hex,
            "sensor_type"     : 'video',
            "notify_endpoint" : self.note_url,
            "command_endpoint": self.cmd_url,
            "data_endpoint"   : self.data_url
        }
        return json.dumps(sensor)

    def frame_size_control_json(self):
        index = self.note_seq
        self.note_seq += 1
        self.note_seq %= sequence_limit
        curr_fs = self.cap.frame_sizes.index(self.cap.frame_size)
        return json.dumps({
            "subject"         : "update",
            "control_id"      : "CAM_RES",
            "seq"             : index,
            "changes"         : {
                "value"           : curr_fs,
                "dtype"           : 'intmapping',
                "min"             : None,
                "max"             : None,
                "res"             : None,
                "def"             : 0,
                "caption"         : 'Resolution',
                "readonly"        : False,
                "map"             : [{
                    'value'  : idx,
                    'caption': '%ix%i'%fs
                } for idx,fs in enumerate(self.cap.frame_sizes)]
            }
        })

    def frame_rate_control_json(self):
        index = self.note_seq
        self.note_seq += 1
        self.note_seq %= sequence_limit
        curr_fr = self.cap.frame_rates.index(self.cap.frame_rate)
        return json.dumps({
            "subject"         : "update",
            "control_id"      : "CAM_RATE",
            "seq"             : index,
            "changes"         : {
                "value"           : curr_fr,
                "dtype"           : 'intmapping',
                "min"             : None,
                "max"             : None,
                "res"             : None,
                "def"             : 0,
                "caption"         : 'Frame Rate',
                "readonly"        : False,
                "map"             : [{
                    'value'  : idx,
                    'caption': '%.1f Hz'%fr
                } for idx,fr in enumerate(self.cap.frame_rates)]
            }
        })

    def bind(self, ctx, sock_type, url, public_ep, set_hwm=None):
        sock = ctx.socket(sock_type)
        if set_hwm: sock.set_hwm(set_hwm)
        sock.bind(url)
        ep = sock.last_endpoint
        port = ep.split(':')[-1]
        public_ep.split(':')[-1]
        public_addr = public_ep.split(':')[:-1]
        return sock, ':'.join(public_addr+[port])
示例#8
0
class Bridge(object):
    """docstring for Bridge"""
    def __init__(self, uvc_id):
        super(Bridge, self).__init__()

        self.data_seq = 0
        self.note_seq = 0

        # init capture
        self.cap = uvc.Capture(uvc_id)
        logger.info('Initialised uvc device {}'.format(self.cap.name))

        # init pyre
        self.network = Pyre(socket.gethostname()+self.cap.name[-4:])
        self.network.join(GROUP)
        self.network.start()
        logger.info('Bridging under "{}"'.format(self.network.name()))

        # init sensor sockets
        ctx = zmq.Context()
        generic_url = 'tcp://*:*'
        public_ep = self.network.endpoint()
        self.note, self.note_url = self.bind(ctx, zmq.PUB, generic_url, public_ep)
        self.data, self.data_url = self.bind(ctx, zmq.PUB, generic_url, public_ep, set_hwm=1)
        self.cmd, self.cmd_url = self.bind(ctx, zmq.PULL, generic_url, public_ep)

    def loop(self):
        logger.info('Entering bridging loop...')
        self.network.shout(GROUP, self.sensor_attach_json().encode())
        try:
            while True:
                self.poll_network()
                self.poll_cmd_socket()
                self.publish_frame()

        except KeyboardInterrupt:
            pass
        except Exception:
            import traceback
            traceback.print_exc()
        finally:
            self.network.shout(GROUP, json.dumps({
                'subject': 'detach',
                'sensor_uuid': self.network.uuid().hex
            }).encode())
            logger.info('Leaving bridging loop...')

    def publish_frame(self):
        frame = self.cap.get_frame_robust()
        now = time.time()
        index = self.data_seq
        self.data_seq += 1
        self.data_seq %= sequence_limit

        jpeg_buffer = frame.jpeg_buffer
        m = hashlib.md5(jpeg_buffer)
        lower_end = int(m.hexdigest(), 16) % 0x100000000
        meta_data = struct.pack('<LLLLdLL', 0x10, frame.width, frame.height, index, now, jpeg_buffer.size, lower_end)
        self.data.send_multipart([self.network.uuid().hex.encode(), meta_data, jpeg_buffer])

    def poll_network(self):
        for event in self.network.recent_events():
            if event.type == 'JOIN' and event.group == GROUP:
                self.network.whisper(event.peer_uuid, self.sensor_attach_json().encode())

    def poll_cmd_socket(self):
        while has_data(self.cmd):
            sensor, cmd_str = self.cmd.recv_multipart()
            try:
                cmd = json.loads(cmd_str.decode())
            except Exception as e:
                logger.debug('Could not parse received cmd: {}'.format(cmd_str))
            else:
                logger.debug('Received cmd: {}'.format(cmd))
                if cmd.get('action') == 'refresh_controls':
                    self.publish_controls()
                elif cmd.get('action') == 'set_control_value':
                    val = cmd.get('value', 0)
                    if cmd.get('control_id') == 'CAM_RATE':
                        self.cap.frame_rate = self.cap.frame_rates[val]
                    elif cmd.get('control_id') == 'CAM_RES':
                        self.cap.frame_size = self.cap.frame_sizes[val]
                    self.publish_controls()

    def __del__(self):
        self.note.close()
        self.data.close()
        self.cmd.close()
        self.network.stop()

    def publish_controls(self):
        self.note.send_multipart([
            self.network.uuid().hex.encode(),
            self.frame_size_control_json().encode()])
        self.note.send_multipart([
            self.network.uuid().hex.encode(),
            self.frame_rate_control_json().encode()])

    def sensor_attach_json(self):
        sensor = {
            "subject": "attach",
            "sensor_name": self.cap.name,
            "sensor_uuid": self.network.uuid().hex,
            "sensor_type": 'video',
            "notify_endpoint": self.note_url,
            "command_endpoint": self.cmd_url,
            "data_endpoint": self.data_url
        }
        return json.dumps(sensor)

    def frame_size_control_json(self):
        index = self.note_seq
        self.note_seq += 1
        self.note_seq %= sequence_limit
        curr_fs = self.cap.frame_sizes.index(self.cap.frame_size)
        return json.dumps({
            "subject": "update",
            "control_id": "CAM_RES",
            "seq": index,
            "changes": {
                "value": curr_fs,
                "dtype": 'intmapping',
                "min": None,
                "max": None,
                "res": None,
                "def": 0,
                "caption": 'Resolution',
                "readonly": False,
                "map": [{
                    'value': idx,
                    'caption': '{:d}x{:d}'.format(*fs)
                } for idx, fs in enumerate(self.cap.frame_sizes)]
            }
        })

    def frame_rate_control_json(self):
        index = self.note_seq
        self.note_seq += 1
        self.note_seq %= sequence_limit
        curr_fr = self.cap.frame_rates.index(self.cap.frame_rate)
        return json.dumps({
            "subject": "update",
            "control_id": "CAM_RATE",
            "seq": index,
            "changes": {
                "value": curr_fr,
                "dtype": 'intmapping',
                "min": None,
                "max": None,
                "res": None,
                "def": 0,
                "caption": 'Frame Rate',
                "readonly": False,
                "map": [{
                    'value': idx,
                    'caption': '{:.1f} Hz'.format(fr)
                } for idx, fr in enumerate(self.cap.frame_rates)]
            }
        })

    def bind(self, ctx, sock_type, url, public_ep, set_hwm=None):
        sock = ctx.socket(sock_type)
        if set_hwm:
            sock.set_hwm(set_hwm)
        sock.bind(url)
        ep = sock.last_endpoint.decode()
        port = ep.split(':')[-1]
        public_ep.split(':')[-1]
        public_addr = public_ep.split(':')[:-1]
        return sock, ':'.join(public_addr+[port])
示例#9
0
class Network:
    hosting: bool = False

    def __init__(self):
        self.open()

    def get_all_groups(self) -> List[str]:
        """Get the names of groups that can be joined."""
        groups = []
        for peer in self.node.peers():
            group = self.node.peer_header_value(peer, 'hosting')
            if group != None and len(group) > 0:
                groups.append(group)
        return groups

    def get_our_group(self) -> Union[str, None]:
        """What is the name of the group we're in or hosting?"""
        our_groups = self.node.own_groups()
        our_groups.remove('untangled2018')
        if len(our_groups) == 0:
            return None
        return our_groups[0]

    def is_in_group(self) -> bool:
        """Are we in or hosting a group?"""
        return self.get_our_group() != None

    def is_hosting(self) -> bool:
        """Are we hosting?"""
        return self.hosting

    def join_group(self, group: str) -> None:
        """Join a group of given name (assumes you are not in a group already)."""
        if group not in self.get_all_groups():
            raise ValueError('Group named "%s" does not exist'.format(group))

        if self.is_in_group():
            raise ValueError(
                'You must leave the previous group before you join another')

        self.node.join(group)

    def leave_group(self) -> None:
        """Leave any joined group or stop hosting."""
        self.hosting = False

        if not self.is_in_group():
            raise ValueError('You are not in a group')

        for group in self.node.own_groups():
            self.node.leave(group)

    def host_group(self, name: str) -> None:
        """Host a group of given name."""
        if name in self.get_all_groups():
            raise ValueError('A group of the given name already exists')

        if self.is_in_group():
            raise ValueError('Cannot host whilst in a group')

        self.node.set_header('hosting', name)
        self.hosting = True
        self.node.join(name)

    def open(self) -> None:
        """Create a new pyre instance and join untangled."""
        self.node = Pyre()
        self.node.start()
        self.node.join('untangled2018')
        # used to get our messages
        self.poller = zmq.Poller()
        self.poller.register(self.node.socket(), zmq.POLLIN)

    def get_id(self) -> str:
        """Get our id, as a unique node on the network."""
        return self.node.uuid()

    def is_me(self, player_id) -> bool:
        """See if a given id is ours."""
        return self.get_id() == player_id

    def close(self) -> None:
        """Disconnect from everything"""
        self.node.stop()

    def get_messages(self):
        """See what has been sent to us: who has joined, what have people said, etc"""
        # what has changed
        changes = dict(self.poller.poll(0))

        # are these the changes we subscribed for
        if self.node.socket() in changes and changes[
                self.node.socket()] == zmq.POLLIN:
            msgs = self.node.recent_events()
            return msgs

        # nothing to return
        return []

    def pull_game(self, game):
        """Update our game state based on what other people tell us."""
        for msg in self.get_messages():
            # is it relevant to us?
            if msg.group != self.get_our_group():
                continue
            if msg.type == 'SHOUT':
                entities = bson.loads(msg.msg[0])

                if 'ids' in entities:
                    keys = entities['ids']
                    cur_keys = list(game.entities.keys())
                    diff = list(set(cur_keys) - set(keys))
                    for key in diff:
                        del game.entities[key]

                entities = entities['components']
                for key, changed_comps in entities.items():
                    key = uuid.UUID(key)
                    if key not in game.entities:
                        game.entities[key] = {}
                    entity = game.entities[key]
                    for compname, component in changed_comps.items():
                        try:
                            clas = components.__dict__[compname]
                            if clas in entity:
                                entity[clas] = entity[clas].replace(
                                    **component)
                            else:
                                entity[clas] = clas(**component)
                            entity[clas].observed_changes()
                        except Exception:
                            print(
                                'Error updating component, is everyone in the group on the same version?',
                                file=sys.stdout)
            elif self.is_hosting():
                if msg.type == 'JOIN':
                    game.on_player_join(msg.peer_uuid)
                    self.push_game(game, initial=True)
                elif msg.type == 'EXIT' or msg.type == "LEAVE":
                    game.on_player_quit(msg.peer_uuid)

    def push_game(self, game, initial=False):
        """Tell others how we've changed the game state."""
        if len(self.node.peers_by_group(self.get_our_group())) == 0:
            # Nobody else to talk to
            return

        entities = {'components': {}}
        if self.is_hosting():
            entities = {'ids': [], 'components': {}}

        for key, entity in game.entities.items():
            changed_comps = {}
            for component in entity.values():
                if component.is_networked() and (initial
                                                 or component.has_changed()):
                    changed_comps[component.get_name()] = component.as_dict()
                    component.observed_changes()
            if 'ids' in entities:
                entities['ids'].append(key)
            entities['components'][str(key)] = changed_comps
        self.node.shout(self.get_our_group(), bson.dumps(entities))
示例#10
0
class Bridge(object):
    """docstring for Bridge"""
    def __init__(self, uvc_id):
        super(Bridge, self).__init__()

        self.data_seq = 0
        self.note_seq = 0

        # init capture
        self.cap = uvc.Capture(uvc_id)
        logger.info("Initialised uvc device {}".format(self.cap.name))

        # init pyre
        self.network = Pyre(socket.gethostname() + self.cap.name[-4:])
        self.network.join(GROUP)
        self.network.start()
        logger.info('Bridging under "{}"'.format(self.network.name()))

        # init sensor sockets
        ctx = zmq.Context()
        generic_url = "tcp://*:*"
        public_ep = self.network.endpoint()
        self.note, self.note_url = self.bind(ctx, zmq.PUB, generic_url,
                                             public_ep)
        self.data, self.data_url = self.bind(ctx,
                                             zmq.PUB,
                                             generic_url,
                                             public_ep,
                                             set_hwm=1)
        self.cmd, self.cmd_url = self.bind(ctx, zmq.PULL, generic_url,
                                           public_ep)

    def loop(self):
        logger.info("Entering bridging loop...")
        self.network.shout(GROUP, self.sensor_attach_json().encode())
        try:
            while True:
                self.poll_network()
                self.poll_cmd_socket()
                self.publish_frame()

        except KeyboardInterrupt:
            pass
        except Exception:
            import traceback

            traceback.print_exc()
        finally:
            self.network.shout(
                GROUP,
                json.dumps({
                    "subject": "detach",
                    "sensor_uuid": self.network.uuid().hex
                }).encode(),
            )
            logger.info("Leaving bridging loop...")

    def publish_frame(self):
        frame = self.cap.get_frame_robust()
        now = time.time()
        index = self.data_seq
        self.data_seq += 1
        self.data_seq %= sequence_limit

        jpeg_buffer = frame.jpeg_buffer
        m = hashlib.md5(jpeg_buffer)
        lower_end = int(m.hexdigest(), 16) % 0x100000000
        meta_data = struct.pack(
            "<LLLLdLL",
            0x10,
            frame.width,
            frame.height,
            index,
            now,
            jpeg_buffer.size,
            lower_end,
        )
        self.data.send_multipart(
            [self.network.uuid().hex.encode(), meta_data, jpeg_buffer])

    def poll_network(self):
        for event in self.network.recent_events():
            if event.type == "JOIN" and event.group == GROUP:
                self.network.whisper(event.peer_uuid,
                                     self.sensor_attach_json().encode())

    def poll_cmd_socket(self):
        while has_data(self.cmd):
            sensor, cmd_str = self.cmd.recv_multipart()
            try:
                cmd = json.loads(cmd_str.decode())
            except Exception as e:
                logger.debug(
                    "Could not parse received cmd: {}".format(cmd_str))
            else:
                logger.debug("Received cmd: {}".format(cmd))
                if cmd.get("action") == "refresh_controls":
                    self.publish_controls()
                elif cmd.get("action") == "set_control_value":
                    val = cmd.get("value", 0)
                    if cmd.get("control_id") == "CAM_RATE":
                        self.cap.frame_rate = self.cap.frame_rates[val]
                    elif cmd.get("control_id") == "CAM_RES":
                        self.cap.frame_size = self.cap.frame_sizes[val]
                    self.publish_controls()

    def __del__(self):
        self.note.close()
        self.data.close()
        self.cmd.close()
        self.network.stop()

    def publish_controls(self):
        self.note.send_multipart([
            self.network.uuid().hex.encode(),
            self.frame_size_control_json().encode()
        ])
        self.note.send_multipart([
            self.network.uuid().hex.encode(),
            self.frame_rate_control_json().encode()
        ])

    def sensor_attach_json(self):
        sensor = {
            "subject": "attach",
            "sensor_name": self.cap.name,
            "sensor_uuid": self.network.uuid().hex,
            "sensor_type": "video",
            "notify_endpoint": self.note_url,
            "command_endpoint": self.cmd_url,
            "data_endpoint": self.data_url,
        }
        return json.dumps(sensor)

    def frame_size_control_json(self):
        index = self.note_seq
        self.note_seq += 1
        self.note_seq %= sequence_limit
        curr_fs = self.cap.frame_sizes.index(self.cap.frame_size)
        return json.dumps({
            "subject": "update",
            "control_id": "CAM_RES",
            "seq": index,
            "changes": {
                "value":
                curr_fs,
                "dtype":
                "intmapping",
                "min":
                None,
                "max":
                None,
                "res":
                None,
                "def":
                0,
                "caption":
                "Resolution",
                "readonly":
                False,
                "map": [{
                    "value": idx,
                    "caption": "{:d}x{:d}".format(*fs)
                } for idx, fs in enumerate(self.cap.frame_sizes)],
            },
        })

    def frame_rate_control_json(self):
        index = self.note_seq
        self.note_seq += 1
        self.note_seq %= sequence_limit
        curr_fr = self.cap.frame_rates.index(self.cap.frame_rate)
        return json.dumps({
            "subject": "update",
            "control_id": "CAM_RATE",
            "seq": index,
            "changes": {
                "value":
                curr_fr,
                "dtype":
                "intmapping",
                "min":
                None,
                "max":
                None,
                "res":
                None,
                "def":
                0,
                "caption":
                "Frame Rate",
                "readonly":
                False,
                "map": [{
                    "value": idx,
                    "caption": "{:.1f} Hz".format(fr)
                } for idx, fr in enumerate(self.cap.frame_rates)],
            },
        })

    def bind(self, ctx, sock_type, url, public_ep, set_hwm=None):
        sock = ctx.socket(sock_type)
        if set_hwm:
            sock.set_hwm(set_hwm)
        sock.bind(url)
        ep = sock.last_endpoint.decode()
        port = ep.split(":")[-1]
        public_ep.split(":")[-1]
        public_addr = public_ep.split(":")[:-1]
        return sock, ":".join(public_addr + [port])
class Transport():
    '''Message transport mechanisms for LCAS'''

    def send(self, dest, ntuple):
        '''Send given ntuple to Transport named dest. If dest isn't listening for messages from this Transport, the message will (currently) be silently ignored.'''
        if self._prefix is not None:
            dest = self._prefix + dest
        self._pyre.shout(dest, json.dumps(ntuple).encode('utf-8'))
    # send()

    # Notes on subscribe
    #
    # The callback is called in the same thread that listens for pyre
    # messages, so the callback should start a new thread if it's
    # going to block or take a long time to run.
    #
    # The callback must take one positional argument, the tuple, and
    # can OPTIONALLY take a keyword argument (e.g. **kw). I use the
    # inspect module to detect this. May be too clever for my own
    # good.
    #
    # There can be only one callback for a given remote. If you call
    # subscribe again with the same remote, it raises an error.

    def subscribe(self, remote, callback):
        '''When a message is sent from a Transport named remote to this transport, call the passed callback with the ntuple as the first argument. If the callback takes **kw, it will also pass additional metadata such as the Transport name, UUID, and IP of the sender.'''
        if self._prefix is not None:
            remote = self._prefix + remote
        if remote in self._subscribers:
            raise TransportError(self, 'Transport.subscribe() was called a second time with the same remote (\"%s\"). You must call Transport.unsubscribe() before setting a new callback.'%(remote))
        self._subscribers[remote] = callback
    # subscribe()

    def unsubscribe(self, remote):
        '''Stop listening for messages from remote.'''
        if self._prefix is not None:
            remote = self._prefix + remote
        if remote in self._subscribers:
            del self._subscribers[remote]
    # unsubscribe()

    def subscribe_all(self, callback):
        '''Call callback every time a message is sent from any remote Transport to this Transport.'''
        if self._subscribe_all is not None:
            raise TransportError(self, 'Transport.subscribe_all() was called a second time. You must call Transport.unsubscribe_all() before setting a new callback.')
        self._subscribe_all = callback
    # subscribe_all()

    def unsubscribe_all(self):
        self._subscribe_all = None
    # unsubscribe_all()

    # Notes on get()
    #    
    # If you already subscribe to remote, temporarly overrides
    # the subscribe. The subscribed callback will NOT be called.
    # The subscription is replaced after get() returns.

    def get(self, remote):
        '''Block waiting for a message from a Transport named remote. Returns python namedtuple containing fields object, uuid, name, ip, datetime.'''

        if self._prefix is not None:
            remote = self._prefix + remote

        # The final python namedtuple to be returned needs to be shared
        # between get_callback() and get(). In python3, you can use
        # nonlocal, but in python2 you need a trick (storing in a
        # data structure). The actual value to be returned will
        # be ret[0].
        ret = [ None ]

        # The event e will get set when a message is read by the
        # readthread.
        e = threading.Event()

        # This function is a callback used to detect the next message.
        # It stores the message in a Python namedtuple and sets the
        # event.

        def get_callback(tup, **kw):
            ret[0] = collections.namedtuple('TransportEnvelope', ['object', 'uuid', 'name', 'ip', 'datetime'])(tup, kw['uuid'], kw['name'], kw['ip'], kw['datetime'])
            # Inform get() that ret is ready to be returned.
            e.set()
        # get_callback()

        # Store the old callback, if any
        oldcb = self._subscribers.get(remote, None)

        # Set the subscription
        self._subscribers[remote] = get_callback
        
        # Wait for the callback to be called.
        e.wait()

        # Restore the old subscription, if any.
        if oldcb is not None:
            self._subscribers[remote] = oldcb
        else:
            del self._subscribers[remote]

        # Return the namedtuple.
        return ret[0]
    # get()

    def quit_federation(self):
        '''Send a quit message to all agents in this federation, and then close down the Transport.'''
        if self._run:
            self._pyre.shouts(self._globalchannel, u"QUIT")
            self._run = False
            # Wait for the readthread to finish
            self._readthread.join()
            # Tell Pyre to shut down
            self._pyre.stop()

    def is_running(self):
        '''Return the status of this Transport. If the Transport isn't running, you should not send it messages and the callbacks will not be called.'''
        return self._run

    ######################################################################
    # All private methods below here

    def __init__(self, myname, port=None, prefix=None):
        # NOTE: Seems to be a bug in Pyre where you can't set the port.
        if port is not None:
            raise NotImplementedError('There is a bug in Pyre that prevents setting of the discovery port. If you require multiple federations of Pyre components, use prefix instead of port in Transport constructor.')

        # dict of remote name to callback. See subscribe method above.
        self._subscribers = {}
        
        # Callback for all message (or None if none registered)
        self._subscribe_all = None

        self._prefix = prefix

        # Attach the federation name as a prefix to both this channel
        # and the global channel. The global channel is currently
        # just used for QUIT messages.

        if prefix is not None:
            myname = prefix + myname
            self._globalchannel = prefix + "GLOBAL"
        else:
            self._globalchannel = "GLOBAL"

        self._pyre = Pyre(myname)
        if port is not None:
            self._pyre.set_port(port)

        self._pyre.join(myname)
        self._pyre.join(self._globalchannel)
        self._pyre.start()

        # Dict of (UUIDs => IP addresses) that have sent a valid ENTER message
        self._uuid2ip = {}

        self._run = True

        self._readthread = threading.Thread(target=self._readworker)
        self._readthread.start()
    # __init__()

    # Handle pyre messages. Run in self._readthread
    def _readworker(self):
        '''This method is called in a separate thread to handle messages sent over pyre. It dispataches to methods named for the pyre events (e.g. _ENTER).'''

        # Set up a poller so recv doesn't block. Possibly not needed
        # since we'll always get an event when the other agents quit,
        # but just in case something goes wrong, we want to be sure to
        # close down.

        poller = zmq.Poller()
        sock = self._pyre.socket()
        poller.register(sock, zmq.POLLIN)

        while self._run:
            # Wait until a message is received OR one second timeout.
            items = dict(poller.poll(1000))
            if not (sock in items and items[sock] == zmq.POLLIN):
                # This should only happen if we time out.
                continue
            # There's an event waiting. Read and process it.
            event = self._pyre.recv()
            logger.debug('Transport %s-%s received event %s'%(self._pyre.uuid(), self._pyre.name(), event))
            eventtype = event[0].decode('utf-8')
            # Sender's uuid and name
            sid = uuid.UUID(bytes=event[1])
            name = event[2].decode('utf-8')
            # Make sure we've seen matching ENTER for all events
            if eventtype != 'ENTER' and sid not in self._uuid2ip:
                raise TransportProtocolError(self, 'Received event %s with no matching ENTER.'%(event))
                continue

            if eventtype == 'ENTER':
                # Changed
                url = event[4].decode('utf-8')
                self._ENTER(sid, name, url)
            elif eventtype == 'JOIN':
                channel = event[3].decode('utf-8')
                self._JOIN(sid, name, channel)
            elif eventtype == 'SHOUT':
                channel = event[3].decode('utf-8')
                message = event[4].decode('utf-8')
                if channel == self._globalchannel and message == "QUIT":
                    # Set ourself to stop running, close down pyre, exit
                    # worker thread.
                    self._run = False
                    self._pyre.stop()
                    break
                else:
                    self._SHOUT(sid, name, channel, message)
            elif eventtype == 'WHISPER':
                message = event[3].decode('utf-8')
                self._WHISPER(sid, name, message)
            elif eventtype == 'LEAVE':
                channel = event[3].decode('utf-8')
                self._LEAVE(sid, name, channel)
            elif eventtype == 'EXIT':
                self._EXIT(sid, name)
            else:
                raise TransportProtocolError(self, 'Illegal event type in event %s'%(event))
    # _readworker()

    # The following methods are named for the pyre event that this
    # instance has received. They are called automatically from the
    # worker thread that's listening for events.

    def _ENTER(self, sid, name, url):
        # We expect all connections to be tcp on some port. This regular
        # expression is used to extract the ip part.
        urlmatch = re.match('tcp://([0-9.]+):[0-9]+$', url)
        if urlmatch:
            ip = urlmatch.group(1)
            if is_valid_ip(ip):
                # Everything looks good. Add to list of valid uuids.
                self._uuid2ip[sid] = ip
            else:
                raise TransportSecurityError(self, 'Message from invalid IP address %s in ENTER %s %s %s. Check the function is_valid_ip() in Transport.py.'%(ip, sid, name, url))
        else:
            raise TransportProtocolError(self, 'Malformed URL in ENTER %s %s %s'%(sid, name, url))
    # _ENTER()

    def _JOIN(self, sid, name, channel):
        pass
    # _JOIN()

    def _SHOUT(self, sid, name, channel, message):
        now = datetime.datetime.now()
        logger.debug('In _SHOUT with %s %s %s %s'%(sid, name, channel, message)) #???
        if name in self._subscribers:
            logger.debug('got a subscription')
            cb = self._subscribers[name]
            self._call_callback(cb, sid, name, channel, message, now)
        if self._subscribe_all is not None:
            cb = self._subscribe_all
            self._call_callback(cb, sid, name, channel, message, now)
    # _SHOUT()

    def _call_callback(self, cb, sid, name, channel, message, now):
        if inspect.getargspec(cb).keywords is None:
            cb(json.loads(message))
        else:
            cb(message, uuid=sid, name=name, ip=self._uuid2ip[sid], datetime=now)
    # _call_callback


    def _WHISPER(self, sid, name, message):
        raise TransportProtocolError(self, 'Unexpected WHISPER from %s %s'%(sid, name))
    # _WHISPER()

    def _LEAVE(self, sid, name, channel):
        pass
    # _LEAVE()

    def _EXIT(self, sid, name):
        # Remove sid from list of valid uuids. This should
        # never be an error since we check in _readworker().
        del self._uuid2ip[sid]
示例#12
0
    def network_manager(self, ctx, write_pipe, node_name, overlay_network_name,
                        read_pipe):
        # create the poller to wait for messages from pipes and network
        poller = zmq.Poller()
        poller.register(write_pipe, zmq.POLLIN)
        # create the Pyre node object
        node = Pyre(node_name + str(uuid.uuid4()))
        # register node to the network, start it and register for events with
        # the poller.
        node.join(overlay_network_name)
        node.start()
        poller.register(node.socket(), zmq.POLLIN)

        while (True):
            # do stuff, aka wait and decode messages
            items = dict(poller.poll())

            if write_pipe in items and items[write_pipe] == zmq.POLLIN:
                # here is where the thread receives internal data
                # check if we have to send something outside
                # or eventually die gracefully
                message = write_pipe.recv()
                # here I have a Command + a FardNetworkData object
                decoded_message = pickle.loads(message)

                command = decoded_message[0]
                network_data = decoded_message[1]

                if command == "$$STOP":
                    # message to quit here
                    break
                elif command == "$$GET_PEERS":
                    # only synchronous command, retrieves the peers inside the
                    # network of tasks.
                    group = network_data.group
                    peers = node.peers_by_group(group)
                    list_of_peers = []
                    for peer in peers:
                        list_of_peers.append(str(peer))
                    write_pipe.send(pickle.dumps(str(";".join(list_of_peers))))
                elif command == "$$SEND_MESSAGE" in decoded_message:
                    # send message to a single peer using Pyre
                    # if requested, send back the message to the same node that
                    # sent it.
                    peer = network_data.peer
                    network_data.sender_peer = str(node.uuid())
                    network_data.message_type = "peer"
                    node.whisper(uuid.UUID(peer), pickle.dumps(network_data))
                    if network_data.auto_send and peer == network_data.sender_peer:
                        read_pipe.send(pickle.dumps(network_data))
                elif command == "$$SEND_TASK_MESSAGE":
                    # send message to a group of identical tasks using Pyre.
                    # Currently implemented with a shout that is ignored by a
                    # receiver if the task name is different from his.
                    # if requested, send back the message to the same node that
                    # sent it.
                    network_data.sender_peer = str(node.uuid())
                    network_data.message_type = "task"
                    node.shout(group, pickle.dumps(network_data))
                    if network_data.auto_send:
                        read_pipe.send(pickle.dumps(network_data))
                elif command == "$$SEND_GROUP_MESSAGE":
                    # send message to the whole application using Pyre
                    # if requested, send back the message to the same node that
                    # sent it.
                    group = network_data.group
                    network_data.sender_peer = str(node.uuid())
                    network_data.message_type = "group"
                    node.shout(group, pickle.dumps(network_data))
                    if network_data.auto_send:
                        read_pipe.send(pickle.dumps(network_data))
            else:
                # here is where the thread receives data from the outside
                # decode messages and reroute them accordingly
                cmds = node.recv()
                msg_type = cmds.pop(0).decode('utf-8')
                peer_uuid = uuid.UUID(bytes=cmds.pop(0))
                sender_node_name = cmds.pop(0).decode('utf-8')
                if msg_type == "SHOUT":
                    group = cmds.pop(0).decode('utf-8')
                    read_pipe.send(cmds.pop(0))
                elif msg_type == "WHISPER":
                    read_pipe.send(cmds.pop(0))
                # elif msg_type == "ENTER":
                #     headers = json.loads(cmds.pop(0).decode('utf-8'))
                # print("NODE_MSG HEADERS: %s" % headers)
                # for key in headers:
                #     print("key = {0}, value = {1}".format(key, headers[key]))
                # print("NODE_MSG CONT: %s" % cmds)

        node.stop()
示例#13
0
class Transport():
    '''Message transport mechanisms for LCAS'''
    def send(self, dest, ntuple):
        '''Send given ntuple to Transport named dest. If dest isn't listening for messages from this Transport, the message will (currently) be silently ignored.'''
        if self._prefix is not None:
            dest = self._prefix + dest
        self._pyre.shout(dest, json.dumps(ntuple).encode('utf-8'))

    # send()

    # Notes on subscribe
    #
    # The callback is called in the same thread that listens for pyre
    # messages, so the callback should start a new thread if it's
    # going to block or take a long time to run.
    #
    # The callback must take one positional argument, the tuple, and
    # can OPTIONALLY take a keyword argument (e.g. **kw). I use the
    # inspect module to detect this. May be too clever for my own
    # good.
    #
    # There can be only one callback for a given remote. If you call
    # subscribe again with the same remote, it raises an error.

    def subscribe(self, remote, callback):
        '''When a message is sent from a Transport named remote to this transport, call the passed callback with the ntuple as the first argument. If the callback takes **kw, it will also pass additional metadata such as the Transport name, UUID, and IP of the sender.'''
        if self._prefix is not None:
            remote = self._prefix + remote
        if remote in self._subscribers:
            raise TransportError(
                self,
                'Transport.subscribe() was called a second time with the same remote (\"%s\"). You must call Transport.unsubscribe() before setting a new callback.'
                % (remote))
        self._subscribers[remote] = callback

    # subscribe()

    def unsubscribe(self, remote):
        '''Stop listening for messages from remote.'''
        if self._prefix is not None:
            remote = self._prefix + remote
        if remote in self._subscribers:
            del self._subscribers[remote]

    # unsubscribe()

    def subscribe_all(self, callback):
        '''Call callback every time a message is sent from any remote Transport to this Transport.'''
        if self._subscribe_all is not None:
            raise TransportError(
                self,
                'Transport.subscribe_all() was called a second time. You must call Transport.unsubscribe_all() before setting a new callback.'
            )
        self._subscribe_all = callback

    # subscribe_all()

    def unsubscribe_all(self):
        self._subscribe_all = None

    # unsubscribe_all()

    # Notes on get()
    #
    # If you already subscribe to remote, temporarly overrides
    # the subscribe. The subscribed callback will NOT be called.
    # The subscription is replaced after get() returns.

    def get(self, remote):
        '''Block waiting for a message from a Transport named remote. Returns python namedtuple containing fields object, uuid, name, ip, datetime.'''

        if self._prefix is not None:
            remote = self._prefix + remote

        # The final python namedtuple to be returned needs to be shared
        # between get_callback() and get(). In python3, you can use
        # nonlocal, but in python2 you need a trick (storing in a
        # data structure). The actual value to be returned will
        # be ret[0].
        ret = [None]

        # The event e will get set when a message is read by the
        # readthread.
        e = threading.Event()

        # This function is a callback used to detect the next message.
        # It stores the message in a Python namedtuple and sets the
        # event.

        def get_callback(tup, **kw):
            ret[0] = collections.namedtuple(
                'TransportEnvelope',
                ['object', 'uuid', 'name', 'ip', 'datetime'])(tup, kw['uuid'],
                                                              kw['name'],
                                                              kw['ip'],
                                                              kw['datetime'])
            # Inform get() that ret is ready to be returned.
            e.set()

        # get_callback()

        # Store the old callback, if any
        oldcb = self._subscribers.get(remote, None)

        # Set the subscription
        self._subscribers[remote] = get_callback

        # Wait for the callback to be called.
        e.wait()

        # Restore the old subscription, if any.
        if oldcb is not None:
            self._subscribers[remote] = oldcb
        else:
            del self._subscribers[remote]

        # Return the namedtuple.
        return ret[0]

    # get()

    def quit_federation(self):
        '''Send a quit message to all agents in this federation, and then close down the Transport.'''
        if self._run:
            self._pyre.shouts(self._globalchannel, u"QUIT")
            self._run = False
            # Wait for the readthread to finish
            self._readthread.join()
            # Tell Pyre to shut down
            self._pyre.stop()

    def is_running(self):
        '''Return the status of this Transport. If the Transport isn't running, you should not send it messages and the callbacks will not be called.'''
        return self._run

    ######################################################################
    # All private methods below here

    def __init__(self, myname, port=None, prefix=None):
        # NOTE: Seems to be a bug in Pyre where you can't set the port.
        if port is not None:
            raise NotImplementedError(
                'There is a bug in Pyre that prevents setting of the discovery port. If you require multiple federations of Pyre components, use prefix instead of port in Transport constructor.'
            )

        # dict of remote name to callback. See subscribe method above.
        self._subscribers = {}

        # Callback for all message (or None if none registered)
        self._subscribe_all = None

        self._prefix = prefix

        # Attach the federation name as a prefix to both this channel
        # and the global channel. The global channel is currently
        # just used for QUIT messages.

        if prefix is not None:
            myname = prefix + myname
            self._globalchannel = prefix + "GLOBAL"
        else:
            self._globalchannel = "GLOBAL"

        self._pyre = Pyre(myname)
        if port is not None:
            self._pyre.set_port(port)

        self._pyre.join(myname)
        self._pyre.join(self._globalchannel)
        self._pyre.start()

        # Dict of (UUIDs => IP addresses) that have sent a valid ENTER message
        self._uuid2ip = {}

        self._run = True

        self._readthread = threading.Thread(target=self._readworker)
        self._readthread.start()

    # __init__()

    # Handle pyre messages. Run in self._readthread
    def _readworker(self):
        '''This method is called in a separate thread to handle messages sent over pyre. It dispataches to methods named for the pyre events (e.g. _ENTER).'''

        # Set up a poller so recv doesn't block. Possibly not needed
        # since we'll always get an event when the other agents quit,
        # but just in case something goes wrong, we want to be sure to
        # close down.

        poller = zmq.Poller()
        sock = self._pyre.socket()
        poller.register(sock, zmq.POLLIN)

        while self._run:
            # Wait until a message is received OR one second timeout.
            items = dict(poller.poll(1000))
            if not (sock in items and items[sock] == zmq.POLLIN):
                # This should only happen if we time out.
                continue
            # There's an event waiting. Read and process it.
            event = self._pyre.recv()
            logger.debug('Transport %s-%s received event %s' %
                         (self._pyre.uuid(), self._pyre.name(), event))
            eventtype = event[0].decode('utf-8')
            # Sender's uuid and name
            sid = uuid.UUID(bytes=event[1])
            name = event[2].decode('utf-8')
            # Make sure we've seen matching ENTER for all events
            if eventtype != 'ENTER' and sid not in self._uuid2ip:
                raise TransportProtocolError(
                    self,
                    'Received event %s with no matching ENTER.' % (event))
                continue

            if eventtype == 'ENTER':
                # Changed
                url = event[4].decode('utf-8')
                self._ENTER(sid, name, url)
            elif eventtype == 'JOIN':
                channel = event[3].decode('utf-8')
                self._JOIN(sid, name, channel)
            elif eventtype == 'SHOUT':
                channel = event[3].decode('utf-8')
                message = event[4].decode('utf-8')
                if channel == self._globalchannel and message == "QUIT":
                    # Set ourself to stop running, close down pyre, exit
                    # worker thread.
                    self._run = False
                    self._pyre.stop()
                    break
                else:
                    self._SHOUT(sid, name, channel, message)
            elif eventtype == 'WHISPER':
                message = event[3].decode('utf-8')
                self._WHISPER(sid, name, message)
            elif eventtype == 'LEAVE':
                channel = event[3].decode('utf-8')
                self._LEAVE(sid, name, channel)
            elif eventtype == 'EXIT':
                self._EXIT(sid, name)
            else:
                raise TransportProtocolError(
                    self, 'Illegal event type in event %s' % (event))

    # _readworker()

    # The following methods are named for the pyre event that this
    # instance has received. They are called automatically from the
    # worker thread that's listening for events.

    def _ENTER(self, sid, name, url):
        # We expect all connections to be tcp on some port. This regular
        # expression is used to extract the ip part.
        urlmatch = re.match('tcp://([0-9.]+):[0-9]+$', url)
        if urlmatch:
            ip = urlmatch.group(1)
            if is_valid_ip(ip):
                # Everything looks good. Add to list of valid uuids.
                self._uuid2ip[sid] = ip
            else:
                raise TransportSecurityError(
                    self,
                    'Message from invalid IP address %s in ENTER %s %s %s. Check the function is_valid_ip() in Transport.py.'
                    % (ip, sid, name, url))
        else:
            raise TransportProtocolError(
                self, 'Malformed URL in ENTER %s %s %s' % (sid, name, url))

    # _ENTER()

    def _JOIN(self, sid, name, channel):
        pass

    # _JOIN()

    def _SHOUT(self, sid, name, channel, message):
        now = datetime.datetime.now()
        logger.debug('In _SHOUT with %s %s %s %s' %
                     (sid, name, channel, message))  #???
        if name in self._subscribers:
            logger.debug('got a subscription')
            cb = self._subscribers[name]
            self._call_callback(cb, sid, name, channel, message, now)
        if self._subscribe_all is not None:
            cb = self._subscribe_all
            self._call_callback(cb, sid, name, channel, message, now)

    # _SHOUT()

    def _call_callback(self, cb, sid, name, channel, message, now):
        if inspect.getargspec(cb).keywords is None:
            cb(json.loads(message))
        else:
            cb(message,
               uuid=sid,
               name=name,
               ip=self._uuid2ip[sid],
               datetime=now)

    # _call_callback

    def _WHISPER(self, sid, name, message):
        raise TransportProtocolError(
            self, 'Unexpected WHISPER from %s %s' % (sid, name))

    # _WHISPER()

    def _LEAVE(self, sid, name, channel):
        pass

    # _LEAVE()

    def _EXIT(self, sid, name):
        # Remove sid from list of valid uuids. This should
        # never be an error since we check in _readworker().
        del self._uuid2ip[sid]
示例#14
0
import sys
import json

print('Pyre START')
n = Pyre("acer_node")

# n.set_header("CHAT_Header1","example header 1")
# n.set_header("CHAT_Header2","example header 2")

print('join')
n.join("CHAT")

print('node START')
n.start()

print('uuid: ', n.uuid())
print('name: ', n.name())

#n.set_name('Acer_Node')
#print('name: ', n.name())

# send by name
# n.whisper('9371B3', 'Hello from the Acer_Node')

# print(n.peers())

msg = 'Hello from acer'
dic_msg = {"name": "John", "age": 30, "car": "bmw"}
jmsg = json.dumps(dic_msg).encode('utf-8')
lst_msg = ['physics', 'Biology', 'chemistry', 'maths']
lst_msg = [x.encode('utf-8') for x in lst_msg]
示例#15
0
class Agent:
    """
    A class object that represents each app in the network"""

    def __init__(self, name, ctx, group_name, cpu_clock_rate, experiment_name):
        self.lock = threading.Lock()
        self.cpu_clock_rate = cpu_clock_rate
        self.cpu_load = random.random()
        self.group_name = group_name
        self.routing_table = None
        self.name = name + str(os.getpid())
        self.tasks = Queue(-1)
        self.results = Queue(-1)
        self.exp_name = experiment_name
        self.task_duration_no_context = random.random()
        # compute duration using cpu load, etc
        self.task_duration_with_context = random.random()
        #self.weights = 'rnn-model-attention-weights.h5'
        #self.model = rnn_model()
        # self.model._make_predict_function()
        # self.model.load_weights(self.weights)
        self.agent = Pyre(
            name=self.name, ctx=ctx or zmq.Context.instance())
        try:
            self.agent.join(group_name)
            self.agent.start()
        except Exception as err:
            logger.error(f'>>> Cant start node: {err}', exc_info=True)

    def routing_table_setter(self, table):
        self.lock.acquire()
        try:
            # create an ascending round robin routing principle
            self.routing_table = cycle(
                sorted(table.items(), key=lambda x: x[1]))
        finally:
            self.lock.release()

    def add_task(self):
        """populates the task queue with new data for inference"""
        logger.debug(f'>>> {threading.current_thread().name} started')
        self.data = cycle(load_data(self.exp_name, 0))
        count = 0
        while count < 100:
            task_dict = dict.fromkeys(
                ['input', 'target', 'task-type', 'task-uuid', 'task-owner-name', 'result', 'duration'], 0)
            try:
                input_data, target_data = next(self.data)
                task_dict['input'] = input_data
                task_dict['target'] = target_data
                task_dict['task-type'] = 1
                task_dict['task-uuid'] = self.agent.uuid()
                task_dict['task-owner-name'] = self.agent.name()
                task_dict['duration'] = time.time()
                self.tasks.put(task_dict)
                count += 1
            except Exception as err:
                logger.error(f'>>> Exception type: {err}', exc_info=True)
                self.agent.leave(self.group_name)
                self.agent.stop()
            # Vary the frequency of input tasks
            time.sleep(random.randint(1, 8))

    def vary_cpu_load(self):
        logger.debug(
            f'>>> {threading.current_thread().name} thread started')
        while True:
            try:
                self.lock.acquire()
                self.cpu_load = random.random()
                self.lock.release()
                self.compute_duration_with_context()
            except Exception as err:
                logger.error(f'>>> Exception: {err}', exc_info=True)
            time.sleep(random.randint(10, 40))

    def compute_duration_with_context(self):
        try:
            self.lock.acquire()
            cpu_load = self.cpu_load
            task_duration_no_context = self.task_duration_no_context
            self.task_duration_with_context = (
                1 / task_duration_no_context) / (cpu_load * self.cpu_clock_rate)
            self.lock.release()
        except Exception as identifier:
            logger.error(f'>>> Exception: {identifier}')

    def compute_local(self, task):
        """argument is task"""
        try:
            task = task
            task_data = task['input']
            target = task['target']
            uuid = task['task-uuid']
            #predictions = self.model.predict(task_data, verbose=0)
            #predictions = predictions.flatten()
            # flatten the target
            average = mean(task_data.flatten())
            # window = 5
            # errors = self.regression_error(predictions, target, window)
            # mu, variance = np.mean(errors), np.var(errors)
            # probabilities = self.chebyshev_probability(mu, variance, errors)
            task['task-type'] = task['task-type'] + 1
            if uuid == self.agent.uuid():  # put results in our queue if its our uuid
                self.results.put(average)
                self.lock.acquire()
                self.task_duration_no_context = time.time() - task['duration']
                self.lock.release()
                self.compute_duration_with_context()
            else:
                task['result'] = average
                data_byte = pickle.dumps(task, -1)
                self.agent.whisper(uuid, data_byte)
                logger.error(
                    f'>>> Results sent back to task owner peer: {task["task-owner-name"]}')
        except Exception as identifier:
            logger.error(f'>>> Exception type: {identifier}', exc_info=True)
            self.agent.leave(self.group_name)
            self.agent.stop()  # clean up if there are issues.

    def check_results(self):
        logger.error(f'>>> {threading.current_thread().name} thread started')
        while True:
            try:
                if not self.results.empty():
                    result = self.results.get()
                    if result <= 0.25:
                        logger.error(
                            f'>>> Critical anomaly detected: {result}')
                    elif result > 0.25 and result < 0.5:
                        logger.error(
                            f'>>> Severe anomaly detected: {result}')
                    elif result > 0.5 and result < 0.75:
                        logger.error(
                            f'>>> Serious anomaly detected: {result}')
                    else:
                        logger.error(f'>>> Mild anomaly detected: {result}')
            except Exception as err:
                logger.error(f'>>> Exception: {err}', exc_info=True)
                self.agent.leave(self.group_name)
                self.agent.stop()

    def outbox(self, task, peer_uuid):
        try:
            task = pickle.dumps(task, -1)
            self.agent.whisper(peer_uuid, task)
        except Exception as identifier:
            logger.error(f'>>> Exception: {identifier}',exc_info=True)
            self.agent.leave(self.group_name)
            self.agent.stop()

    def num_of_peers(self, table):
        seen = []
        for peer in table:
            if peer[0] in seen:
                return len(seen)
            else:
                seen.append(peer[0])

    def handle_task(self):
        # decide if to compute locally or offload
        logger.error(f'>>> {threading.current_thread().name} thread started')
        while True:
            try:
                if not self.tasks.empty():
                    task = self.tasks.get()
                    self.lock.acquire()
                    local_duration = self.task_duration_with_context
                    table = self.routing_table
                    if table:
                        peer = next(table)  # peer = (uuid, latency)
                        if peer[1] < local_duration:
                            self.outbox(task, peer[0])
                            logger.debug(f'>>> Task offloaded')
                        else:
                            num_of_peers = self.num_of_peers(table)
                            peer = self.search_table(
                                table, num_of_peers, local_duration)
                            if peer:
                                self.lock.release()
                                self.outbox(task, peer[0])
                                logger.debug(f'>>> Task offloaded')
                            else:
                                self.compute_local(task)
                                logger.debug(f'>>> Task computed locally')
                    else:
                        self.compute_local(task)
                        logger.debug(f'>>> Task computed locally')
            except Exception as identifier:
                logger.error(
                    f'>>> Exception type : {identifier}', exc_info=True)
                self.agent.leave(self.group_name)
                self.agent.stop()  # stop if there are issues
            time.sleep(random.randint(0, 3))

    def search_table(self, table, num_of_peers, local_dur):
        for id in range(num_of_peers):
            peer = next(table)
            if peer[1] < local_dur:
                return peer
            else:
                return None

    def inbox(self):
        logger.error(f'>>> {threading.current_thread().name} thread started')
        try:
            events = self.agent.events()  # works like charm
            while True:
                if events:
                    event = next(events)
                    logger.error(f'>>> MSG TYPE: {event.type}')
                    logger.error(f'>>> Sender Agent Name: {event.peer_name}')
                    if event.type == 'WHISPER':
                        msg = pickle.loads(event.msg[0])
                        if msg['task-type'] == 2:
                            result = msg['result']
                            self.results.put(result)
                        elif msg['task-type'] == 1:  # peer sent us a task to execute
                            self.tasks.put(msg)
                    elif event.type == 'SHOUT':  # message from the Access Point AP
                        msg = pickle.loads(event.msg[0])
                        if msg['msg-type'] == 'REQUEST':
                            msg['uuid'] = self.agent.uuid()
                            self.lock.acquire()
                            msg['processing-time'] = self.task_duration_with_context
                            self.lock.release()
                            msg_b = pickle.dumps(msg, -1)
                            self.agent.whisper(event.peer_uuid, msg_b)
                        elif msg['msg-type'] == 'UPDATE':
                            table = msg['table']
                            own_uuid = self.agent.uuid()
                            if own_uuid in table.keys():
                                # remove our own UUID to avoid offloading to ourselves
                                del table[own_uuid]
                            self.routing_table_setter(table)
        except Exception as identifier:
            logger.error(f'>>> Exception type: {identifier}', exc_info=True)
            self.agent.leave(self.group_name)
            self.agent.stop()  # leave the cluster if you have issues

    # compute the chebyshev probability
    def chebyshev_probability(self, average, varianse, error_val):
        probability = []
        for val in error_val:
            if val - average >= 1:
                prob = varianse / ((val - average)**2)
                probability.append(prob)
        return probability

    def regression_error(self, outcome, truth, window):
        n_data = len(truth)
        count = 0
        errors = []
        while count + window <= n_data:
            error = [abs(y_pred - y_truth) for y_pred, y_truth in zip(
                outcome[count:count + window], truth[count:count + window])]
            errors.append(np.mean(error))
            count += window
        return errors

    def run(self):
        # start the threads here
        t1 = threading.Thread(target=self.add_task, name='add task')
        t2 = threading.Thread(target=self.vary_cpu_load, name='vary cpu load')
        t3 = threading.Thread(target=self.check_results, name='check results')
        t4 = threading.Thread(target=self.handle_task, name='handle task')
        t5 = threading.Thread(target=self.inbox, name='inbox')
        threads = [t1, t2, t3, t4, t5]
        try:
            for thread in threads:
                thread.start()
        except Exception as err:
            logger.error(f'>>> Exception: {err}', exc_info=True)
            self.agent.leave(self.group_name)
            self.agent.stop()