コード例 #1
0
async def thread_main(window: MyGame, loop):
    ctx = Context()

    sub_sock: Socket = ctx.socket(zmq.SUB)
    sub_sock.connect('tcp://localhost:25000')
    sub_sock.subscribe('')

    push_sock: Socket = ctx.socket(zmq.PUSH)
    push_sock.connect('tcp://localhost:25001')

    async def pusher():
        """Push the player's INPUT state 60 times per second"""
        while True:
            d = window.player_event.asdict()
            msg = dict(counter=1, event=d)
            await push_sock.send_json(msg)
            await asyncio.sleep(1 / UPDATE_TICK)

    async def receive_game_state():
        while True:
            gs_string = await sub_sock.recv_string()
            # logger.debug('.', end='', flush=True)
            window.game_state.from_json(gs_string)
            ps = window.game_state.player_states[0]
            t = time.time()
            window.position_buffer.append((Vec2d(ps.x, ps.y), t))
            window.t = 0
            window.player_position_snapshot = copy.copy(window.player.position)

    try:
        await asyncio.gather(pusher(), receive_game_state())
    finally:
        sub_sock.close(1)
        push_sock.close(1)
        ctx.destroy(linger=1)
コード例 #2
0
async def main():
    fut = asyncio.Future()  # IGNORE!
    app = App(signal=fut)  # IGNORE!

    gs = GameState(player_states=[PlayerState(speed=150)])

    ctx = Context()  # "Task A" (ZeroMQ)

    sock_B: Socket = ctx.socket(zmq.PULL)
    sock_B.bind('tcp://*:25001')
    task_B = create_task(update_from_client(gs, sock_B))

    sock_C: Socket = ctx.socket(zmq.PUB)
    sock_C.bind('tcp://*:25000')
    task_C = create_task(push_game_state(gs, sock_C))

    try:
        await asyncio.wait([task_B, task_C, fut],
                           return_when=asyncio.FIRST_COMPLETED)
    except CancelledError:
        print('Cancelled')
    finally:
        sock_B.close(1)
        sock_C.close(1)
        ctx.destroy(linger=1)
コード例 #3
0
async def main():
    fut = asyncio.Future()
    app = App(signal=fut)
    ctx = Context()

    sock_push_gamestate: Socket = ctx.socket(zmq.PUB)
    sock_push_gamestate.bind('tcp://*:25000')

    sock_recv_player_evts: Socket = ctx.socket(zmq.PULL)
    sock_recv_player_evts.bind('tcp://*:25001')

    ticker_task = asyncio.create_task(
        ticker(sock_push_gamestate, sock_recv_player_evts),
    )
    try:
        await asyncio.wait(
            [ticker_task, fut],
            return_when=asyncio.FIRST_COMPLETED
        )
    except CancelledError:
        print('Cancelled')
    finally:
        ticker_task.cancel()
        await ticker_task
        sock_push_gamestate.close(1)
        sock_recv_player_evts.close(1)
        ctx.destroy(linger=1000)
コード例 #4
0
async def iomain(window: MyGame, loop):
    ctx = Context()

    sub_sock: Socket = ctx.socket(zmq.SUB)
    sub_sock.connect('tcp://localhost:25000')
    sub_sock.subscribe('')  # Required for PUB+SUB

    push_sock: Socket = ctx.socket(zmq.PUSH)
    push_sock.connect('tcp://localhost:25001')

    async def send_player_input():
        """ Task A """
        while True:
            d = asdict(window.player_input)
            msg = dict(event=d)
            await push_sock.send_json(msg)
            await asyncio.sleep(1 / UPDATE_TICK)

    async def receive_game_state():
        """ Task B """
        while True:
            gs_string = await sub_sock.recv_string()
            window.game_state.from_json(gs_string)
            ps = window.game_state.player_states[0]
            window.position_buffer.append( (Vec2d(ps.x, ps.y), time.time()) )
            window.time_since_state_update = 0
            window.player_position_snapshot = copy.copy(window.player.position)

    try:
        await asyncio.gather(send_player_input(), receive_game_state())
    finally:
        sub_sock.close(1)
        push_sock.close(1)
        ctx.destroy(linger=1)
コード例 #5
0
class Server:

    transaction = 0

    def __init__(self):
        self.logger = getLogger(__name__)
        self.context = Context()
        self.pub_sock = self.context.socket(PUB)
        self.pub_sock.bind("tcp://127.0.0.1:40086")
        self.running = True
        self.handlers = {}

    async def recv_rep_and_process(self):
        rep_sock = self.context.socket(REP)
        rep_sock.bind("tcp://127.0.0.1:40085")
        while self.running:
            if await rep_sock.poll(timeout=10) and self.running:
                msg = await rep_sock.recv_json()
                resp = await self.handle_req(msg)
                self.logger.debug('resp: %s', resp)
                await rep_sock.send_json(resp)
        rep_sock.close()

    def stop(self):
        self.logger.info('stopping zmq server ...')
        self.running = False
        self.pub_sock.close()

    async def handle_req(self, msg):
        action = msg.get('action', '')
        del msg['action']
        self.__class__.transaction += 1
        seq = self.__class__.transaction
        if action in self.handlers:
            self.logger.info('handle(seq: %d) %s, with %s', seq, action, msg)
            try:
                resp = await self.handlers[action](**msg)
                self.logger.info('handle(seq: %d) %s, return %s', seq, action,
                                 resp)
                return resp
            except RuntimeError as err:
                self.logger.error('error while handle(seq: %d) %s:\n%s', seq,
                                  action, traceback.format_exc())
                return dict(error=1, message=str(err))
            except Exception as ex:
                self.logger.error(type(ex))
        else:
            self.logger.error('register with action: %s not exist', action)
            return dict(error=1, message='Invalud action: {}'.format(action))

    def register_callback(self, action, func):
        self.handlers[action] = func

    async def notify_service_event(self, service, data):
        await self.pub_sock.send_string(service, flags=SNDMORE)
        self.logger.debug('publish: %s', data)
        await self.pub_sock.send_json(data)
コード例 #6
0
def main(context: Context):
    config_manager = ConfigurationManager()
    config = config_manager.config
    rcs = RCSnail()
    rcs.sign_in_with_email_and_password(os.getenv('RCS_USERNAME', ''),
                                        os.getenv('RCS_PASSWORD', ''))

    loop = asyncio.get_event_loop()

    data_queue = context.socket(zmq.PUB)
    loop.run_until_complete(
        initialize_publisher(data_queue, config.data_queue_port))

    controls_queue = context.socket(zmq.SUB)
    loop.run_until_complete(
        initialize_subscriber(controls_queue, config.controls_queue_port))

    pygame_event_queue = asyncio.Queue()
    pygame.init()
    pygame.display.set_caption("RCSnail Connector")

    screen = pygame.display.set_mode(
        (config.window_width, config.window_height))
    interceptor = Interceptor(config, data_queue, controls_queue)
    car = JoystickCar(config,
                      send_car_state=interceptor.send_car_state,
                      recv_car_controls=interceptor.recv_car_controls)
    renderer = JoystickRenderer(config, screen, car)
    renderer.init_controllers()
    interceptor.set_renderer(renderer)

    pygame_task = loop.run_in_executor(None, renderer.pygame_event_loop, loop,
                                       pygame_event_queue)
    render_task = asyncio.ensure_future(renderer.render(rcs))
    event_task = asyncio.ensure_future(
        renderer.register_pygame_events(pygame_event_queue))
    queue_task = asyncio.ensure_future(
        rcs.enqueue(loop,
                    interceptor.new_frame,
                    interceptor.new_telemetry,
                    track=config.track,
                    car=config.car))

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        print("Closing due to keyboard interrupt.")
    finally:
        queue_task.cancel()
        pygame_task.cancel()
        render_task.cancel()
        event_task.cancel()
        pygame.quit()
        asyncio.ensure_future(rcs.close_client_session())
コード例 #7
0
ファイル: spqueue.py プロジェクト: lw000/zmq_guide
def run_queue():
    context = Context(1)

    frontend = context.socket(zmq.ROUTER)    # ROUTER
    backend = context.socket(zmq.ROUTER)     # ROUTER
    frontend.bind("tcp://*:5555")            # For clients
    backend.bind("tcp://*:5556")             # For workers

    poll_workers = Poller()
    poll_workers.register(backend, zmq.POLLIN)

    poll_both = Poller()
    poll_both.register(frontend, zmq.POLLIN)
    poll_both.register(backend, zmq.POLLIN)

    workers = []

    while True:
        if workers:
            socks = yield from poll_both.poll()
        else:
            socks = yield from poll_workers.poll()
        socks = dict(socks)

        # Handle worker activity on backend
        if socks.get(backend) == zmq.POLLIN:
            # Use worker address for LRU routing
            msg = yield from backend.recv_multipart()
            if not msg:
                break
            print('I: received msg: {}'.format(msg))
            address = msg[0]
            workers.append(address)

            # Everything after the second (delimiter) frame is reply
            reply = msg[2:]

            # Forward message to client if it's not a READY
            if reply[0] != LRU_READY:
                print('I: sending -- reply: {}'.format(reply))
                yield from frontend.send_multipart(reply)
            else:
                print('I: received ready -- address: {}'.format(address))

        if socks.get(frontend) == zmq.POLLIN:
            # Get client request, route to first available worker
            msg = yield from frontend.recv_multipart()
            worker = workers.pop(0)
            request = [worker, b''] + msg
            print('I: sending -- worker: {}  msg: {}'.format(worker, msg))
            yield from backend.send_multipart(request)
コード例 #8
0
async def app(store: StorageServerStore, context: Context, name: str,
              command: str, arg: str):
    print("Starting...")
    dirserv_commands = context.socket(zmq.REQ)
    dirserv_commands.connect("tcp://127.0.0.1:5350")
    print("App is started")
    if command == "declare":
        self_addr = await asyncio.wait_for(ping(dirserv_commands, name), 5)
        print("Directory server report this client is run on {}".format(
            self_addr))
        command_port: Socket = context.socket(zmq.ROUTER)
        port = command_port.bind_to_random_port("tcp://127.0.0.1")
        self_entrypoint_addr = "tcp://{}:{}".format(self_addr, port)
        await asyncio.wait_for(
            cast_address(dirserv_commands, name, self_entrypoint_addr), 5)
        print("Address {} casted on directory server".format(
            self_entrypoint_addr))
        with open(arg, mode='rb') as f:
            store.files[arg] = VirtualFile(arg, f.read(), [])
        await declare_file(dirserv_commands, arg, name)
        print("File {} is declared, serving file...".format(arg))
        poller = Poller()
        poller.register(command_port, zmq.POLLIN)
        while True:
            events: List[Tuple[Socket, int]] = await poller.poll()
            for socket, mark in events:
                frames: List[Frame] = await socket.recv_multipart(copy=False)
                id_frame = frames.pop(0)
                frames.pop(0)
                command_frame = frames.pop(0)
                command = str(command_frame.bytes, 'utf8')
                if socket == command_port:
                    if command == 'fs.read_file':
                        await read_file_handler(store, frames, socket,
                                                id_frame)
    elif command == "disown":
        await disown_file(dirserv_commands, arg, name)
        context.destroy()
        return
    elif command == "show":
        content = await download_file(context, dirserv_commands, arg)
        print("==== Content of '{}' ====".format(arg))
        print(str(content, 'utf8'))
        context.destroy()
        return
    else:
        print("Unknown command {}".format(command))
        context.destroy()
        return
コード例 #9
0
ファイル: spworker.py プロジェクト: lw000/zmq_guide
def run_worker():
    context = Context(1)
    worker = context.socket(zmq.REQ)

    identity = "%04X-%04X" % (randint(0, 0x10000), randint(0, 0x10000))
    worker.setsockopt_string(zmq.IDENTITY, identity)
    worker.connect("tcp://localhost:5556")

    print("I: (%s) worker ready" % identity)
    yield from worker.send_string(LRU_READY)

    cycles = 0
    while True:
        msg = yield from worker.recv_multipart()
        if not msg:
            break

        cycles += 1
        if cycles > 3 and randint(0, 5) == 0:
            print("I: (%s) simulating a crash" % identity)
            break
        elif cycles > 3 and randint(0, 5) == 0:
            print("I: (%s) simulating CPU overload" % identity)
            yield from asyncio.sleep(3)
        print("I: (%s) normal reply" % identity)
        # Do some heavy work
        yield from asyncio.sleep(1)
        yield from worker.send_multipart(msg)
コード例 #10
0
ファイル: client.py プロジェクト: matrixji/eha
class Client:

    def __init__(self, name):
        self.name = name
        self.logger = getLogger(__name__)
        self.context = Context()
        self.req_sock = self.context.socket(REQ)
        self.sub_sock = self.context.socket(SUB)
        self.connect_req_sock()
        self.uuid = str(uuid4())

    def connect_req_sock(self):
        self.logger.debug('client start to connect ...')
        self.req_sock.connect("tcp://127.0.0.1:40085")
        self.logger.debug('client connected')

    async def register(self, active_count=1):
        msg = dict(
            action='register',
            name=self.name,
            uuid=self.uuid,
            active_count=active_count)
        await self.req_sock.send_json(msg)
        resp = await self.req_sock.recv_json()
        if not resp or resp.get('error') != 0:
            raise RuntimeError(str(resp))

    async def keepalive(self):
        msg = dict(
            action='keepalive',
            name=self.name,
            uuid=self.uuid)
        await self.req_sock.send_json(msg)
        self.logger.debug('waiting keepalive resp ...')
        resp = await self.req_sock.recv_json()
        self.logger.debug('resp for keepalive: %s', resp)
        if not resp or resp.get('error') != 0:
            raise RuntimeError(str(resp))

    async def unregister(self):
        msg = dict(
            action='unregister',
            name=self.name,
            uuid=self.uuid)
        await self.req_sock.send_json(msg)
        await self.req_sock.recv_json()

    def subscribe(self, topic=None):
        if not topic:
            topic = self.name
        self.sub_sock.connect("tcp://127.0.0.1:40086")
        self.sub_sock.setsockopt_string(SUBSCRIBE, topic)
        self.logger.info('subscribe with topic: %s', topic)

    async def fetch_event(self):
        _, msg = await self.sub_sock.recv_multipart()
        self.logger.info('event: %s', msg)
        event = json.loads(msg.decode('utf-8'))
        self.logger.info('event: %s', event)
        return event
コード例 #11
0
ファイル: rolspy.py プロジェクト: wwj718/ROLS
 def __init__(self):
     context = Context()
     self.pub = context.socket(zmq.PUB)
     self.sub = context.socket(zmq.SUB)
     self.pub.bind('tcp://127.0.0.1:2000')
     self.sub.connect('tcp://127.0.0.1:2000')
     time.sleep(0.5) # 确保连接完成
コード例 #12
0
class async_zmq_streaming_subscriber(object):
    def __init__(self, zmp_subscribing_port: int):
        super(async_zmq_streaming_subscriber, self).__init__()
        self._port = zmp_subscribing_port
        self.zmq_context = Context()
        self.zmq_bingding_socket = self.zmq_context.socket(SUB)

        self.zmq_bingding_socket.setsockopt(TCP_KEEPALIVE, 1)  # 保活
        self.zmq_bingding_socket.setsockopt(TCP_KEEPALIVE_CNT,
                                            5)  # 保活包没有响应超过5次,执行重连
        self.zmq_bingding_socket.setsockopt(TCP_KEEPALIVE_IDLE,
                                            60)  #空闲超过60秒判定需要发送保活包
        self.zmq_bingding_socket.setsockopt(TCP_KEEPALIVE_INTVL, 3)  #保活包发送间隔3秒
        self.zmq_bingding_socket.setsockopt_string(SUBSCRIBE, "")  #指定订阅频道,必须

        zmq_sub_address = f"tcp://*:{zmp_subscribing_port}"
        self.zmq_bingding_socket.bind(zmq_sub_address)
        pass

    async def loop_runner(self):

        print(f"zmq端口{self._port}开始sub监听")

        while True:
            msg = await self.zmq_bingding_socket.recv()
            print(msg)  #显示订阅收到的消息

    def run(self):
        run(self.loop_runner())
コード例 #13
0
async def download_file(context: Context, dirserv_sock: Socket,
                        filename: str) -> bytes:
    devices = await get_file_declared_devices(dirserv_sock, filename)
    all_declared_addresses = []
    for dev_name in devices:
        addresses = await get_devices_declared_addresses(
            dirserv_sock, dev_name)
        all_declared_addresses += addresses
    used_address = choice(
        all_declared_addresses
    )  # we use only one connection to 'download' files here,
    # but a complete implementation must download them from different devices to speed up the process
    print("download_file(): using address {}".format(used_address))
    download_sock: Socket = context.socket(zmq.REQ)
    download_sock.connect(used_address)
    await download_sock.send_multipart(
        [b"fs.read_file", bytes(filename, 'utf8')])
    frames: List[bytes] = await asyncio.wait_for(
        download_sock.recv_multipart(), 5)
    # This is just a sample protocol, and it does not need complex functions to deal with big contents
    download_sock.close()
    if frames[0][0] == 0:
        return frames.pop(1)
    else:
        return None
コード例 #14
0
ファイル: server.py プロジェクト: davebshow/aiogoblin
class RPC(WSRPCHandler):
    """Application RPC. RPC methods should start with the `rpc_` prefix"""

    def __init__(self, loop=None):
        if loop is None:
            loop = asyncio.get_event_loop()
        self._loop = loop
        self._context = Context()

    async def rpc_echo(self, ws, method, blob):
        ws.send_bytes(blob)

    async def rpc_echo_worker(self, ws, method, blob):
        socket = self._context.socket(zmq.DEALER)
        socket.connect('tcp://localhost:5559')
        await socket.send_multipart([b'', blob])

        message = await socket.recv_multipart()
        assert message[-1] == blob, '%s does not equal %s' % (
            message[-1], blob)
        ws.send_bytes(message[-1])

        # Echo worker streams `closing` after echoing
        message = await socket.recv_multipart()
        assert message[-1] == b'closing', '%s does not equal %s' % (
            message1[-1], 'closing')
        ws.send_bytes(message[-1])
コード例 #15
0
async def main(context: Context):
    config_manager = ConfigurationManager()
    conf = config_manager.config
    # transformer = Transformer(conf)
    recorder = Recorder(conf)

    data_queue = context.socket(zmq.SUB)
    controls_queue = context.socket(zmq.PUB)

    control_mode = conf.control_mode
    dagger_training_enabled = conf.dagger_training_enabled
    dagger_epoch_size = conf.dagger_epoch_size

    try:
        mem_slice_frames = []
        mem_slice_numerics = []
        data_count = 0
        
        await initialize_subscriber(data_queue, conf.data_queue_port)
        await initialize_publisher(controls_queue, conf.controls_queue_port)

        while True:
            frame, data = await recv_array_with_json(queue=data_queue)
            telemetry, expert_action = data
            if frame is None or telemetry is None or expert_action is None:
                logging.info("None data")
                continue

            try:
                next_controls = expert_action.copy()
                time.sleep(0.01)
                
                recorder.record_full(frame, telemetry, expert_action, next_controls)
                controls_queue.send_json(next_controls)
            except Exception as ex:
                print("Sending exception: {}".format(ex))
                traceback.print_tb(ex.__traceback__)
    except Exception as ex:
        print("Exception: {}".format(ex))
        traceback.print_tb(ex.__traceback__)
    finally:
        data_queue.close()
        controls_queue.close()

        if recorder is not None:
            recorder.save_session_with_expert()
コード例 #16
0
class Server:
    def __init__(self, stop_on_loop):
        self.stop_on_loop = stop_on_loop  # for integration tests
        self.messages_queue = asyncio.Queue()
        self.context = Context()
        self.sleep_sec = 1
        self.loops = 0

        self.receiver_socket = self.context.socket(zmq.REP)
        self.receiver_socket.bind("tcp://127.0.0.1:8000")

        self.pub_socket = self.context.socket(zmq.PUB)
        self.pub_socket.bind("tcp://*:8001")

    async def receive_message(self):
        received_message = await self.receiver_socket.recv()
        self.messages_queue.put_nowait(received_message)
        await asyncio.sleep(self.sleep_sec)
        await self.receiver_socket.send_string(
            "Echo reply from the server: %s" % received_message)

    async def publish_message(self):
        message_to_publish = await self.messages_queue.get()
        await self.pub_socket.send(message_to_publish)
        self.messages_queue.task_done()
        await asyncio.sleep(self.sleep_sec)

    async def wait_until_all_messages_published(self):
        await self.messages_queue.join()

    async def run_one_loop(self):
        self.loops += 1
        await asyncio.gather(self.receive_message(),
                             self.publish_message(),
                             self.wait_until_all_messages_published(),
                             return_exceptions=True)

    async def run(self):
        while True:
            await self.run_one_loop()
            print('running server loop #no: ', self.loops)
            if self.stop_on_loop and self.stop_on_loop == self.loops:
                self.receiver_socket.close()
                self.pub_socket.close()
                break
コード例 #17
0
ファイル: client.py プロジェクト: aohan237/simrpc
 def get_socket(self):
     if self.is_async:
         context = Context()
     else:
         context = zmq.Context()
     socket = context.socket(zmq.REQ)
     socket.connect(self.server_address)
     self.poll.register(socket, zmq.POLLIN)
     return socket
コード例 #18
0
ファイル: sender_pub_notlogger.py プロジェクト: cjrh/venus
async def main():
    ctx = Context()
    sock = ctx.socket(zmq.PUB)
    sock.bind('tcp://127.0.0.1:12345')
    try:
        await sender(sock)
    finally:
        sock.close(1)
        ctx.destroy()
コード例 #19
0
async def main():
    ctx = Context()
    sock = ctx.socket(zmq.SUB)
    sock.connect('tcp://127.0.0.1:12345')
    sock.subscribe(b'')
    try:
        await receiver(sock)
    finally:
        sock.close(1)
        ctx.destroy()
コード例 #20
0
async def storage_server(store: StorageServerStore, context: Context,
                         name: str):
    print("Starting...")
    dirserv_commands = context.socket(zmq.REQ)
    dirserv_commands.connect("tcp://127.0.0.1:5350")
    self_addr = await asyncio.wait_for(ping(dirserv_commands, name), 5)
    print("Directory server report this client is run on {}".format(self_addr))
    self_entrypoint_addr = "tcp://{}:{}".format(self_addr, 5354)
    command_port = context.socket(zmq.ROUTER)
    command_port.bind("tcp://127.0.0.1:5354")
    await asyncio.wait_for(
        cast_address(dirserv_commands, name, self_entrypoint_addr), 5)
    print("Address {} casted on directory server".format(self_entrypoint_addr))
    file_changes_sub = context.socket(zmq.SUB)
    file_changes_sub.connect("tcp://127.0.0.1:5351")
    file_changes_sub.setsockopt(zmq.SUBSCRIBE, b"fs")
    poller = Poller()
    poller.register(file_changes_sub, zmq.POLLIN)
    poller.register(command_port, zmq.POLLIN)
    print("Storage server is started")
    while True:
        events: List[Tuple[Socket, int]] = await poller.poll()
        for socket, mark in events:
            frames: List[Frame] = await socket.recv_multipart(copy=False)
            if socket == command_port:
                id_frame = frames.pop(0)
                frames.pop(0)
                command_frame = frames.pop(0)
                command = str(command_frame.bytes, 'utf8')
                if command == 'fs.read_file':
                    await read_file_handler(store, frames, socket, id_frame)
            elif socket == file_changes_sub:
                command_frame = frames.pop(0)
                command = str(command_frame.bytes, 'utf8')
                print("File change received: {}".format(command))
                if command == 'fs.delete_file':
                    await delete_file_event_callback(store, frames,
                                                     dirserv_commands, name)
                elif command == 'fs.new_file':
                    await new_file_event_callback(store, frames,
                                                  dirserv_commands, context,
                                                  name)
コード例 #21
0
async def zmain():
    logger.info(123)
    ctx = Context()
    logger.info(456)
    sock: Socket = ctx.socket(zmq.DEALER)
    logger.info(789)
    idd = settings.IDENTITY
    logger.info(idd)
    sock.identity = idd.encode()
    logger.info(345)
    connection_string = f'tcp://{settings.TARGET_SERVER_URL()}:{settings.TARGET_SERVER_PORT():d}'
    logger.info(f'Connecting to {connection_string}')
    sock.connect(connection_string)
    try:
        await process_messages(sock)
    except asyncio.CancelledError:
        pass
    finally:
        sock.close(1)
        ctx.destroy()
コード例 #22
0
class ZMQScaffolding:
    def __init__(self, base_url='127.0.0.1', subscriber_port='9999', publisher_port='9998', filters=(b'', )):
        self.base_url = base_url
        self.subscriber_port = subscriber_port
        self.publisher_port = publisher_port
        self.subscriber_url = 'tcp://{}:{}'.format(self.base_url, self.subscriber_port)
        self.publisher_url = 'tcp://{}:{}'.format(self.base_url, self.publisher_port)

        self.filters = filters

    def connect(self):
        self.context = Context()

        self.sub_socket = self.context.socket(socket_type=zmq.SUB)
        self.pub_socket = self.context.socket(socket_type=zmq.PUB)
        self.pub_socket.connect(self.publisher_url)

        self.sub_socket.bind(self.subscriber_url)

        for filter in self.filters:
            self.sub_socket.subscribe(filter)
コード例 #23
0
    async def async_tasks(self):
        context = Context()
        socket = context.socket(zmq.REP)
        socket.connect(self.worker_address)
        loop = asyncio.get_event_loop()
        # copy data to thread local
        self.data_local.data = deepcopy(self.data)

        if self.need_monitor:
            my_monitor = self.monitor_cls()
            await my_monitor.prepare()
        while True:
            #  Wait for next request from client
            message = await socket.recv()
            try:
                res_message = await self.async_dispatch(message)
            except Exception as tmp:
                res_message = ""
                logger.exception(tmp)
            await socket.send(res_message)
            if self.need_monitor:
                data = {'req': message, 'rep': res_message}
                loop.create_task(my_monitor.receive(data))
コード例 #24
0
ファイル: lpserver.py プロジェクト: lw000/zmq_guide
def run_server():
    context = Context()
    server = context.socket(zmq.REP)
    server.bind(SERVER_ADDR)
    cycles = 0
    while True:
        request = yield from server.recv()
        cycles += 1
        # Simulate various problems, after a few cycles
        if cycles > 3 and randint(0, 3) == 0:
            print("I: Simulating a crash")
            server.unbind(SERVER_ADDR)
            # Delay for a bit, else we get "Address already in use" error.
            # Note that to really simulate a crash, we should probably kill
            # this process and start another.
            yield from asyncio.sleep(2)
            break
        elif cycles > 3 and randint(0, 3) == 0:
            print("I: Simulating CPU overload")
            yield from asyncio.sleep(2)
        print("I: Normal request (%s)" % request)
        yield from asyncio.sleep(1)       # Do some heavy work
        yield from server.send(request)
    return (context, server)
コード例 #25
0
ファイル: lbbroker2.py プロジェクト: Andy-hpliu/zguide
def run_broker(loop):
    """ main broker method """
    print('(run_broker) starting')
    url_worker = "inproc://workers"
    url_client = "inproc://clients"
    client_nbr = NBR_CLIENTS * 3
    # Prepare our context and sockets
    context = Context()
    frontend = context.socket(zmq.ROUTER)
    frontend.bind(url_client)
    backend = context.socket(zmq.ROUTER)
    backend.bind(url_worker)
    print('(run_broker) creating workers and clients')
    # create workers and clients threads
    worker_tasks = []
    for idx in range(NBR_WORKERS):
        task = asyncio.ensure_future(run_worker(url_worker, context, idx))
        worker_tasks.append(task)
    client_tasks = []
    for idx in range(NBR_CLIENTS):
        task = asyncio.ensure_future(run_client(url_client, context, idx))
        client_tasks.append(task)
    print('(run_broker) after creating workers and clients')
    # Logic of LRU loop
    # - Poll backend always, frontend only if 1+ worker ready
    # - If worker replies, queue worker as ready and forward reply
    # to client if necessary
    # - If client requests, pop next worker and send request to it
    # Queue of available workers
    available_workers = 0
    workers_list = []
    # init poller
    poller = Poller()
    # Always poll for worker activity on backend
    poller.register(backend, zmq.POLLIN)
    # Poll front-end only if we have available workers
    poller.register(frontend, zmq.POLLIN)
    while True:
        socks = yield from poller.poll()
        socks = dict(socks)
        # Handle worker activity on backend
        if (backend in socks and socks[backend] == zmq.POLLIN):
            # Queue worker address for LRU routing
            message = yield from backend.recv_multipart()
            assert available_workers < NBR_WORKERS
            worker_addr = message[0]
            # add worker back to the list of workers
            available_workers += 1
            workers_list.append(worker_addr)
            #   Second frame is empty
            empty = message[1]
            assert empty == b""
            # Third frame is READY or else a client reply address
            client_addr = message[2]
            # If client reply, send rest back to frontend
            if client_addr != b'READY':
                # Following frame is empty
                empty = message[3]
                assert empty == b""
                reply = message[4]
                yield from frontend.send_multipart([client_addr, b"", reply])
                printdbg('(run_broker) to frontend -- reply: "{}"'.format(
                    reply))
                client_nbr -= 1
                if client_nbr == 0:
                    printdbg('(run_broker) exiting')
                    break   # Exit after N messages
        # poll on frontend only if workers are available
        if available_workers > 0:
            if (frontend in socks and socks[frontend] == zmq.POLLIN):
                # Now get next client request, route to LRU worker
                # Client request is [address][empty][request]
                response = yield from frontend.recv_multipart()
                [client_addr, empty, request] = response
                assert empty == b""
                #  Dequeue and drop the next worker address
                available_workers += -1
                worker_id = workers_list.pop()
                yield from backend.send_multipart(
                    [worker_id, b"", client_addr, b"", request])
                printdbg('(run_broker) to backend -- request: "{}"'.format(
                    request))
    #out of infinite loop: do some housekeeping
    printdbg('(run_broker) finished')
    for worker_task in worker_tasks:
        worker_task.cancel()
    printdbg('(run_broker) workers cancelled')
    yield from asyncio.sleep(1)
    frontend.close()
    backend.close()
    #context.term()     # Caution: calling term() blocks.
    loop.stop()
    printdbg('(run_broker) returning')
    return 'finished ok'
コード例 #26
0
class NodeCommunicator(object):
    LAST_MSG = None

    def __init__(self, peers_config, my_id, linger_timeout):
        self.peers_config = peers_config
        self.my_id = my_id

        self.bytes_sent = 0
        self.benchmark_logger = logging.LoggerAdapter(
            logging.getLogger("benchmark_logger"), {"node_id": my_id}
        )

        self._dealer_tasks = []
        self._router_task = None
        self.linger_timeout = linger_timeout
        self.zmq_context = Context(io_threads=cpu_count())

        n = len(peers_config)
        self._receiver_queue = asyncio.Queue()
        self._sender_queues = [None] * n
        for i in range(n):
            if i == self.my_id:
                self._sender_queues[i] = self._receiver_queue
            else:
                self._sender_queues[i] = asyncio.Queue()

    def send(self, node_id, msg):
        msg = (self.my_id, msg) if node_id == self.my_id else msg
        self._sender_queues[node_id].put_nowait(msg)

    async def recv(self):
        return await self._receiver_queue.get()

    async def __aenter__(self):
        await self._setup()
        return self

    async def __aexit__(self, exc_type, exc, tb):
        # Add None to the sender queues and drain out all the messages.
        for i in range(len(self._sender_queues)):
            if i != self.my_id:
                self._sender_queues[i].put_nowait(NodeCommunicator.LAST_MSG)
        await asyncio.gather(*self._dealer_tasks)
        logging.debug("Dealer tasks finished.")
        self._router_task.cancel()
        logging.debug("Router task cancelled.")
        self.zmq_context.destroy(linger=self.linger_timeout * 1000)
        self.benchmark_logger.info("Total bytes sent out: %d", self.bytes_sent)

    async def _setup(self):
        # Setup one router for a party, this acts as a
        # server for receiving messages from other parties.
        router = self.zmq_context.socket(ROUTER)
        router.bind(f"tcp://*:{self.peers_config[self.my_id].port}")
        # Start a task to receive messages on this node.
        self._router_task = asyncio.create_task(self._recv_loop(router))
        self._router_task.add_done_callback(print_exception_callback)

        # Setup one dealer per receving party. This is used
        # as a client to send messages to other parties.
        for i in range(len(self.peers_config)):
            if i != self.my_id:
                dealer = self.zmq_context.socket(DEALER)
                # This identity is sent with each message. Setting it to my_id, this is
                # used to appropriately route the message. This is not a good idea since
                # a node can pretend to send messages on behalf of other nodes.
                dealer.setsockopt(IDENTITY, str(self.my_id).encode())
                dealer.connect(
                    f"tcp://{self.peers_config[i].ip}:{self.peers_config[i].port}"
                )
                # Setup a task which reads messages intended for this
                # party from a queue and then sends them to this node.
                task = asyncio.create_task(
                    self._process_node_messages(
                        i, self._sender_queues[i], dealer.send_multipart
                    )
                )
                self._dealer_tasks.append(task)

    async def _recv_loop(self, router):
        while True:
            sender_id, raw_msg = await router.recv_multipart()
            msg = loads(raw_msg)
            # logging.debug("[RECV] FROM: %s, MSG: %s,", sender_id, msg)
            self._receiver_queue.put_nowait((int(sender_id), msg))

    async def _process_node_messages(self, node_id, node_msg_queue, send_to_node):
        while True:
            msg = await node_msg_queue.get()
            if msg is NodeCommunicator.LAST_MSG:
                logging.debug("No more messages to Node: %d can be sent.", node_id)
                break
            raw_msg = dumps(msg)
            self.bytes_sent += len(raw_msg)
            # logging.debug("[SEND] TO: %d, MSG: %s", node_id, msg)
            await send_to_node([raw_msg])
コード例 #27
0
ファイル: controller.py プロジェクト: DrawML/research-dist
async def _report_task_result(context : Context, task_info : TaskInformation):

    sock = context.socket(zmq.DEALER)
    sock.connect(task_info.result_receiver_address.to_zeromq_addr())
コード例 #28
0
ファイル: worker.py プロジェクト: DrawML/research-dist
def _worker_main(id, slave_addr, task):

    print("_worker_main")

    import zmq
    from zmq.asyncio import Context, ZMQEventLoop
    import asyncio
    from ..common.task import SleepTaskResult
    from .task import SleepTask

    def _resolve_msg(msg):
        print(msg)
        #addr = msg[0]
        #assert msg[1] == b""
        header = msg[0]
        assert msg[1] == b""
        body = msg[2]

        return header, body

    def _dispatch_msg(header, body = b""):
        async def _dispatch_msg(msg):
            await socket.send_multipart(msg)

        msg = [id.encode(encoding='utf-8'), b'', header, b'', body]
        asyncio.ensure_future(_dispatch_msg(msg))

    def __dispatch_msg(header, body=b""):
        def _dispatch_msg(msg):
            socket.send_multipart(msg)

        msg = [id.encode(encoding='utf-8'), b'', header, b'', body]
        _dispatch_msg(msg)

    def _process_sleep_task(task):
        async def __process_sleep_task(task):
            await asyncio.sleep(task.job.seconds)
            task.result = SleepTaskResult("Sleep " + str(task.job.seconds) + "By " + id)
            _dispatch_msg(b"TaskFinish", task.result.to_bytes())

        asyncio.ensure_future(__process_sleep_task(task))

    async def _run_worker():
        _dispatch_msg(b"TaskStart")
        if isinstance(task, SleepTask):
            _process_sleep_task(task)
        else:
            raise ValueError("Invalid Task Type.")

        while True:
            msg = await socket.recv_multipart()
            header, body = _resolve_msg(msg)
            # some codes will be filled later.
            break

    print("[Worker {0}] I'm created!".format(id))

    loop = ZMQEventLoop()
    asyncio.set_event_loop(loop)

    context = Context()
    socket = context.socket(zmq.DEALER)

    socket.connect(slave_addr)

    """
    policy = asyncio.get_event_loop_policy()
    policy.set_event_loop(policy.new_event_loop())
    loop = asyncio.get_event_loop()
    """

    loop.run_until_complete(_run_worker())
コード例 #29
0
async def main_dagger(context: Context):
    config_manager = ConfigurationManager()
    conf = config_manager.config
    transformer = Transformer(conf)
    recorder = Recorder(conf, transformer)

    data_queue = context.socket(zmq.SUB)
    controls_queue = context.socket(zmq.PUB)

    control_mode = conf.control_mode
    dagger_training_enabled = conf.dagger_training_enabled
    dagger_epoch_size = conf.dagger_epoch_size

    try:
        model = ModelWrapper(conf, output_shape=2)
        mem_slice_frames = []
        mem_slice_numerics = []
        data_count = 0
        dagger_iteration = 0

        await initialize_subscriber(data_queue, conf.data_queue_port)
        await initialize_publisher(controls_queue, conf.controls_queue_port)

        while True:
            frame, data = await recv_array_with_json(queue=data_queue)
            # TODO handle case if expert data is not available, i.e full model control
            telemetry, expert_action = data
            if frame is None or telemetry is None or expert_action is None:
                logging.info("None data")
                continue

            #recorder.record_with_expert(frame, telemetry, expert_action)
            mem_frame = transformer.session_frame_wide(frame, mem_slice_frames)
            mem_telemetry = transformer.session_numeric_input(
                telemetry, mem_slice_numerics)
            mem_expert_action = transformer.session_expert_action(
                expert_action)
            if mem_frame is None or mem_telemetry is None:
                # Send back these first few instances, as the other application expects 1:1 responses
                controls_queue.send_json(expert_action)
                continue

            data_count += recorder.record_session(mem_frame, mem_telemetry,
                                                  mem_expert_action)
            if control_mode == 'shared' and dagger_training_enabled and data_count % dagger_epoch_size == 0:
                recorder.store_session_batch(dagger_epoch_size)

                if dagger_iteration < conf.dagger_epochs_count:
                    # send 0 throttle so the car won't go wild during fitting
                    null_controls = expert_action.copy()
                    null_controls['d_throttle'] = 0.0
                    controls_queue.send_json(null_controls)

                    await fit_and_eval_model(model, conf)
                    dagger_iteration += 1
                    logging.info('Dagger iter {}'.format(dagger_iteration))
                    continue
                else:
                    dagger_iteration = 50
            try:
                if control_mode == 'full_expert' or expert_action[
                        'manual_override']:
                    next_controls = expert_action.copy()
                    time.sleep(0.035)
                elif control_mode == 'full_model':
                    next_controls = model.predict(mem_frame,
                                                  mem_telemetry).to_dict()
                    next_controls['d_gear'] = mem_expert_action[0]
                    #next_controls['d_throttle'] = mem_expert_action[2]
                elif control_mode == 'shared':
                    expert_probability = np.exp(-0.02 * dagger_iteration)
                    model_probability = np.random.random()
                    model_action = model.predict(mem_frame,
                                                 mem_telemetry).to_dict()

                    if expert_probability > model_probability:
                        next_controls = model_action
                        next_controls['d_gear'] = mem_expert_action[0]
                        next_controls['d_steering'] = mem_expert_action[1]
                        next_controls['d_throttle'] = mem_expert_action[2]
                    else:
                        next_controls = model_action
                        next_controls['d_gear'] = mem_expert_action[0]
                else:
                    raise ValueError('Misconfigured control mode!')

                recorder.record_full(frame, telemetry, expert_action,
                                     next_controls)
                controls_queue.send_json(next_controls)
            except Exception as ex:
                print("Predicting exception: {}".format(ex))
                traceback.print_tb(ex.__traceback__)
    except Exception as ex:
        print("Exception: {}".format(ex))
        traceback.print_tb(ex.__traceback__)
    finally:
        data_queue.close()
        controls_queue.close()

        files = glob.glob(conf.path_to_session_files + '*')
        for f in files:
            os.remove(f)
        logging.info("Session partials deleted successfully.")

        if recorder is not None:
            #recorder.save_session_with_expert()
            recorder.save_session_with_predictions()

        model.save_best_model()
コード例 #30
0
ファイル: main_windows.py プロジェクト: enliktjioe/deltax
async def main_dagger(context: Context):
    config_manager = ConfigurationManager()
    conf = config_manager.config

    recorder = None

    data_queue = context.socket(zmq.SUB)
    controls_queue = context.socket(zmq.PUB)

    # print("================= TEST =================")
    # print(data_queue)
    # print(controls_queue)
    p = 1

    control_mode = conf.control_mode  # DELTAX: make sure this is full_model in the config

    try:
        model = ModelWrapper(conf, output_shape=2)

        await initialize_subscriber(data_queue, conf.data_queue_port)
        await initialize_publisher(controls_queue, conf.controls_queue_port)

        while True:
            frame, data = await recv_array_with_json(queue=data_queue)

            frame = frame  #/255.0  <- DELTAX make sure you trained the model on same range of data
            #print(np.max(frame))  <- if incoming frames are in range 0-255 and you trained the model on images scaled to 0-1, model wont work
            frame = frame[:, ::
                          -1, :]  #DELTAX: image comes in mirrored in Ubuntu! Need to flip them back for data to be like what we tained with
            frame = np.flip(frame, axis=2)

            expert_action = data

            if np.random.random() > 0.99:
                print("================= TEST =================")
                skimage.io.imsave("raw_input_in_try_loop.png", frame)

            if frame is None or expert_action is None:
                logging.info("None data")
                continue

            #DELTAX PREPROCESSING like the one that was done when training the model
            # frame = skimage.transform.resize(frame, (60,180,3))
            mem_frame = frame[-60:, :, :].reshape(
                1, 60, 180, 3
            )  #reshaping as model expects a minibatch dimension as first dim

            #DELTAX - spy function that saves images so you can peek what the model actually sees
            if np.random.random() > 0.99:
                skimage.io.imsave("example_input_in_try_loop.png",
                                  mem_frame[0, :, :, :])

            #It seems the condition below never happens - we should get an json serializability error here, but never do
            if mem_frame is None:
                # Send back these first few instances, as the other application expects 1:1 responses
                print("NONE NONE NONE NONE")
                controls_queue.send_json(
                    {
                        's': 0,
                        'g': 1,
                        't': 0.65,
                        'b': 0
                    }
                )  #TODO need to send repetition of last command or {'d_steer':0,....}
                continue

            try:
                if control_mode == 'full_expert':  #this would be if you steer by controller
                    next_controls = expert_action.copy()
                    print("Full expert ", next_controls)
                    time.sleep(0.035)
                elif control_mode == 'full_model':  #DELTAX: this is where we work in!
                    # print("=============== MASUK ===============")
                    # print(mem_frame)
                    # print("=============== KELUAR ===============")
                    controls = model.model.predict(
                        mem_frame
                    )[0]  #DELTAX - neural network makes predictions based on frame
                    #print(controls) #DELTAX: this printout helps you understand if model outputs are in reasonable range (-1 to 1 fr steering)

                    #
                    next_controls = {"p": p}
                    p = p + 1

                    # c is a timestamp
                    next_controls["c"] = int(time.time())

                    # DELTAX: always go forward
                    next_controls["g"] = 1

                    #DELTAX: floats we sent must be float64, as flat32 is not json serializable for some reason
                    next_controls["s"] = max(-1, min(1,
                                                     np.float64(controls[0])))
                    #for throttle you can use 0 to just test if car turns wheels in good direction at different locations

                    #the minimal throttle to make the car move slowly is around 0.65, depends on battery charge level
                    next_controls["t"] = np.float64(
                        0.6)  # max(0,min(1, np.float64(controls[1])))}

                    #DELTAX: to use model's output for throttle, not fixed value
                    #next_controls['t'] = max(0,min(1, np.float64(controls[1])))}

                    #not dure if needed:
                    next_controls["b"] = 0

                    print(next_controls)

                else:
                    raise ValueError('Misconfigured control mode!')

                controls_queue.send_json(next_controls)
                #input('Let us wait for user input. Let me know how many seconds to sleep now.\n')

            except Exception as ex:
                print("Predicting exception: {}".format(ex))
                traceback.print_tb(ex.__traceback__)
    except Exception as ex:
        print("Exception: {}".format(ex))
        traceback.print_tb(ex.__traceback__)
    finally:
        data_queue.close()
        controls_queue.close()

        files = glob.glob(conf.path_to_session_files + '*')
        for f in files:
            os.remove(f)
        logging.info("Session partials deleted successfully.")

        if recorder is not None:
            #recorder.save_session_with_expert()
            recorder.save_session_with_predictions()

        model.save_best_model()
コード例 #31
0
class QWeatherClient:
    """Client class for the QWeather messaging framework"""
    class serverclass:
        """Support class to represent the available servers as objects, with their exposed functions as callable attributes. The __repr__ makes it look like they are server objects"""
        def __init__(self,name,addr,methods,client):
            self.name = name
            self.addr = addr
            self.client = client
            for amethod in methods:
                setattr(self,amethod[0],self.bindingfunc(amethod[0],amethod[1]))


        def bindingfunc(self,methodname,methoddoc):
            """Ensures that "calling" the attribute of the "server"object with the name of a server function, sends a request to the server to execute that function and return the response"""
            def func(*args,**kwargs):
                timeout = kwargs.pop('timeout',CSYNCTIMEOUT) # This pops the value for timeout if it exists in kwargs, or returns the default timeout value. So this saves a line of code on logic check
                return self.client.send_request([self.name.encode(),methodname.encode(),pickle.dumps([args,kwargs])],timeout=timeout)
            func.__name__ = methodname
            func.__doc__ = methoddoc
            func.__repr__ = lambda: methoddoc
            func.is_remote_server_method = True
            return func


        def __repr__(self):
            msg = ""
            lst = [getattr(self,method) for method in dir(self) if getattr(getattr(self,method),'is_remote_server_method',False)]
            if len(lst) == 0:
                return 'No servers connected'
            else:
                for amethod in lst:
                    msg += amethod.__name__ +"\n"
            return msg.strip()
    

    context = None
    socket = None
    poller = None
    futureobjectdict = {}

    def __init__(self,QWeatherStationIP,name = None,loop = None,debug=False,verbose=False):
        IpAndPort = re.search(IPREPATTERN,QWeatherStationIP)
        assert IpAndPort != None, 'Ip not understood (tcp://xxx.xxx.xxx.xxx:XXXX or txp://localhost:XXXX)'
        self.QWeatherStationIP = IpAndPort.group(1)
        self.QWeatherStationSocket = IpAndPort.group(2)
        assert self.QWeatherStationIP[:6] == 'tcp://', 'Ip not understood (tcp://xxx.xxx.xxx.xxx:XXXX or txp://localhost:XXXX)'
        assert len(self.QWeatherStationSocket) == 4, 'Port not understood (tcp://xxx.xxx.xxx.xxx:XXXX or txp://localhost:XXXX)'
        if loop is None:
            self.loop = asyncio.get_event_loop()
        else:
            self.loop = loop

        if name is None:
            import socket
            name = socket.gethostname()

        formatting = '{:}: %(levelname)s: %(message)s'.format(name)
        if debug:
            logging.basicConfig(format=formatting,level=logging.DEBUG)
        if verbose:
            logging.basicConfig(format=formatting,level=logging.INFO)
        self.name = name.encode()
        self.reconnect()
#        self.ping_broker()
        self.loop.run_until_complete(self.get_server_info())
        self.running = False
        self.messageid = 0
        atexit.register(self.close)


    def reconnect(self):
        '''connects or reconnects to the broker'''
        if self.poller:
            self.poller.unregister(self.socket)
        if self.socket: 
            self.socket.close()
        self.context = Context()
        self.socket = self.context.socket(zmq.DEALER)
        self.socket.connect(self.QWeatherStationIP + ':' + self.QWeatherStationSocket)
        self.subsocket = self.context.socket(zmq.SUB)
        self.subsocket.connect(self.QWeatherStationIP + ':' + str(int(self.QWeatherStationSocket) + SUBSOCKET))

        self.poller = Poller()
        self.poller.register(self.socket,zmq.POLLIN)
        self.poller.register(self.subsocket,zmq.POLLIN)

    def subscribe(self,servername,function):
        """Subscribe to a server with a callback function"""
        self.subsocket.setsockopt(zmq.SUBSCRIBE,servername.encode())
        self.subscribers[servername] = function

    def unsubscribe(self,servername):
        """Unsubscribe from a server"""
        self.subsocket.setsockopt(zmq.UNSUBSCRIBE,servername.encode())
        self.subscribers.pop(servername)
        
    
    async def get_server_info(self):
        """Get information about servers from the broker"""
        msg = [b'',b'C',CREADY,PCLIENT,self.name]
        self.send_message(msg)
        msg = await self.recieve_message()
        empty = msg.pop(0)
        assert empty == b''
        command = msg.pop(0)
        self.serverlist = []
        self.subscribers = {}
        if command == CREADY + CFAIL:
            raise Exception(msg.pop(0).decode())
        else:
            serverdict = pickle.loads(msg.pop(0))
            servermethoddict = pickle.loads(msg.pop(0))
            for addr,name in serverdict.items():
                methods = servermethoddict[addr]
                server = self.serverclass(name,addr,methods,self)
                server.is_remote_server = True
                setattr(self,name,server)
                self.serverlist.append(server)

    

    def send_request(self,body,timeout):
        """Send a request. If the client is running (i.e. in async mode) send an async request, else send a synchronous request\n
        Attach a messageID to each request. (0-255)"""
        self.messageid+=1
        if self.messageid > 255:
            self.messageid = 0
        if self.running:
            result =  asyncio.get_event_loop().create_task(self.async_send_request(body,self.messageid.to_bytes(1,'big')))
        else:
            result = self.sync_send_request(body,self.messageid.to_bytes(1,'big'),timeout)
        return result

    def ping_broker(self):
        """Ping the broker"""
        self.send_message([b'',b'P'])
        try:
            if len(self.loop.run_until_complete(self.poller.poll(timeout=2000))) == 0: #wait 2 seconds for a ping from the broker
                raise Exception('QWeatherStation not found')
            else:
                msg =  self.loop.run_until_complete(self.recieve_message())
                empty = msg.pop(0)
                pong = msg.pop(0)

                logging.debug('Recieved Pong: {:}'.format(pong))
                if pong != b'b':
                    raise Exception('QWeatherStation sent wrong Pong')              

        except Exception as e:
            self.poller.unregister(self.socket)
            self.socket.close()
            raise e
        

    def sync_send_request(self,body,ident,timeout):
        """Synchronously send request. Timeout with the default timeoutvalue [FINDOUTHOWTOLINKTOTHECONSTANTSPAGETOSHOWDEFAULTVALUE]"""
        msg = [b'',b'C',CREQUEST,ident]  + body
        server = body[0]
        self.send_message(msg)
        if len(self.loop.run_until_complete(self.poller.poll(timeout=timeout))) == 0:
            return Exception('Synchronous request timed out. Try adding following keyword to function call: "timeout=XX" in ms')
        else:
            msg = self.loop.run_until_complete(self.recieve_message())
            empty = msg.pop(0)
            assert empty == b''
            command = msg.pop(0)
            ident = msg.pop(0)
            server = msg.pop(0)
            answ = pickle.loads(msg[0])
            return answ
   
    async def async_send_request(self,body,ident):
        """Ansynchronously send request. No explicit timeout on the client side for this. Relies on the "servertimeout" on the broker side"""
        server = body[0]
        msg = [b'',b'C',CREQUEST,ident]  + body


        self.send_message(msg)
        answ = await self.recieve_future_message(ident+server) #Waits here until the future is set to completed
        self.futureobjectdict.pop(ident+server)
        return answ

    def send_message(self,msg):
        """Send a multi-frame-message over the ZMQ socket"""
        self.socket.send_multipart(msg)


    def recieve_future_message(self,id):
        """Create a future for the async request, add it to the dict of futures (id = messageid+server"""
        tmp = self.loop.create_future()
        self.futureobjectdict[id] = tmp
        return tmp

    async def recieve_message(self):
        """Recieve a multi-frame-message over the zmq socket"""
        msg = await self.socket.recv_multipart()
        return msg

    def handle_message(self,msg):
        """First step of handling an incoming message\n
        First asserts that the first frame is empty\n
        Then sorts the message into either request+success, request+fail or ping"""
        empty = msg.pop(0)
        assert empty == b''
        command = msg.pop(0)

        if command == CREQUEST + CSUCCESS:
            messageid = msg.pop(0)
            servername = msg.pop(0)
            msg = pickle.loads(msg[0])
            self.handle_request_success(messageid,servername,msg)

        elif command == CREQUEST + CFAIL:
            messageid = msg.pop(0)
            servername = msg.pop(0)
            self.handle_request_fail(messageid,servername)

        elif command == CPING:
            ping = msg.pop(0)
            if ping != b'P':
                raise Exception('QWeatherStation sent wrong ping')
            logging.debug('Recieved Ping from QWeatherStation')
            self.send_message([b'',b'b'])

    def handle_request_success(self,messageid,servername,msg):
        """Handle successful request by setting the result of the future (manually finishing the future)"""
        self.futureobjectdict[messageid + servername].set_result(msg)

    def handle_request_fail(self,messageid,servername):
        """Handle a failed request by setting the future to an exception"""
        self.futureobjectdict[messageid+server].set_exception(Exception(msg.pop(0)))

    def handle_broadcast(self,msg):
        """Handle a message on the broadcast socket by calling the callback function connected to the relevant server"""
        server= msg.pop(0).decode()
        msg = pickle.loads(msg.pop(0))
        self.subscribers[server](msg)

    async def run(self):
        """Asynchronously run the client by repeatedly polling the recieving socket"""
        self.running = True
        while True:
            try:
                socks = await self.poller.poll(1000)
                socks = dict(socks)
                if self.socket in socks:
                    msg = await self.recieve_message()
                    self.handle_message(msg)

                elif self.subsocket in socks:
                    msg = await self.recieve_message()
                    self.handle_broadcast(msg)

            except KeyboardInterrupt:
                self.close()
                break



    def close(self):
        """Closing function. Tells the broker that it disconnects. Is not called if the terminal is closed or the process is force-killed"""
        self.send_message([b'',b'C',CDISCONNECT])
        self.poller.unregister(self.socket)
        self.socket.close()



    def __repr__(self):
        msg = ""
        if len(self.serverlist) == 0:
            return 'No servers connected'
        else:
            for aserver in self.serverlist:
                msg += aserver.name + "\n"
        return msg.strip()

    def __iter__(self):
        return (aserv for aserv in self.serverlist)

    def __getitem__(self,key):
        return self.serverlist[key]
コード例 #32
0
async def main_dagger(context: Context):
    config_manager = ConfigurationManager()
    conf = config_manager.config

    recorder = None

    data_queue = context.socket(zmq.SUB)
    controls_queue = context.socket(zmq.PUB)

    control_mode = conf.control_mode  # DELTAX: make sure this is full_model in the config

    init_jalan = True
    counter_speed = 0

    try:
        model = ModelWrapper(conf, output_shape=2)

        await initialize_subscriber(data_queue, conf.data_queue_port)
        await initialize_publisher(controls_queue, conf.controls_queue_port)

        while True:
            frame, data = await recv_array_with_json(queue=data_queue)

            frame = frame  #/255.0  <- DELTAX make sure you trained the model on same range of data
            #print(np.max(frame))  <- if incoming frames are in range 0-255 and you trained the model on images scaled to 0-1, model wont work
            frame = frame[:, ::
                          -1, :]  #DELTAX: image comes in mirrored in Ubuntu! Need to flip them back for data to be like what we tained with
            frame = np.flip(frame, axis=2)
            expert_action = data

            if np.random.random() > 0.99:
                skimage.io.imsave("raw_input_in_try_loop.png", frame)

            if frame is None or expert_action is None:
                logging.info("None data")
                continue

            #DELTAX PREPROCESSING like the one that was done when training the model
            mem_frame = frame[-60:, :, :].reshape(
                1, 60, 180, 3
            )  #reshaping as model expects a minibatch dimension as first dim

            #DELTAX - spy function that saves images so you can peek what the model actually sees
            if np.random.random() > 0.99:
                skimage.io.imsave("example_input_in_try_loop.png",
                                  mem_frame[0, :, :, :])

            if mem_frame is None:
                # Send back these first few instances, as the other application expects 1:1 responses
                print("NONE NONE NONE NONE")
                controls_queue.send_json(
                    {
                        'd_steering': 0,
                        'd_gear': 1,
                        'd_throttle': 0.65
                    }
                )  #TODO need to send repetition of last command or {'d_steer':0,....}
                continue

            # f = open("demofile2.txt", "a")
            # mem_frame_list = mem_frame.reshape(60*180*3)

            # for i in mem_frame_list:
            #     f.write("{0}, ".format(i))

            # f.write("=============================== \n")

            # f.close()

            try:
                if control_mode == 'full_expert':  #this would be if you steer by controller
                    next_controls = expert_action.copy()
                    time.sleep(0.035)
                elif control_mode == 'full_model':  #DELTAX: this is where we work in!
                    controls = model.model.predict(
                        mem_frame
                    )[0]  #DELTAX - neural network makes predictions based on frame
                    #DELTAX: this printout helps you understand if model outputs are in reasonable range (-1 to 1 fr steering)

                    #because of the wrong calibration from the controller, it made the car tends to go right in the "jalan lurus", so we need to decrease it
                    s = np.float64(controls[0])

                    if (s > -0.15) and (s < 0.2):
                        s = s - 0.15

                    s = max(-1, min(1, s))

                    #DELTAX: floats we sent must be float64, as flat32 is not json serializable for some reason
                    next_controls = {'d_steering': s}

                    next_controls['d_gear'] = 1  # DELTAX: always go forward
                    #for throttle you can use 0 to just test if car turns wheels in good direction at different locations
                    #the minimal throttle to make the car move slowly is around 0.65, depends on battery charge level

                    t = 0.47

                    if (s >= -0.25) and (s <= 0.25):
                        counter_speed = counter_speed + 1
                    else:
                        counter_speed = 0
                        #pass

                    if counter_speed >= 10:
                        t = 0.74
                        counter_speed = 0

                    if init_jalan:
                        next_controls['d_throttle'] = np.float64(
                            t)  # max(0,min(1, np.float64(controls[1])))}
                        init_jalan = False
                    else:
                        next_controls['d_throttle'] = np.float64(
                            t)  # max(0,min(1, np.float64(controls[1])))}

                    print(next_controls, counter_speed)

                    #DELTAX: to use model's output for throttle, not fixed value
                    #next_controls['d_throttle'] = max(0,min(1, np.float64(controls[1])))}

                else:
                    raise ValueError('Misconfigured control mode!')

                controls_queue.send_json(next_controls)

            except Exception as ex:
                print("Predicting exception: {}".format(ex))
                traceback.print_tb(ex.__traceback__)
    except Exception as ex:
        print("Exception: {}".format(ex))
        traceback.print_tb(ex.__traceback__)
    finally:
        data_queue.close()
        controls_queue.close()

        files = glob.glob(conf.path_to_session_files + '*')
        for f in files:
            os.remove(f)
        logging.info("Session partials deleted successfully.")

        if recorder is not None:
            #recorder.save_session_with_expert()
            recorder.save_session_with_predictions()

        model.save_best_model()
コード例 #33
0
class FireSystem:
    router = {}

    def __init__(self):
        self.active = True
        self.ping = 0
        self.ping_rate = 4000
        self.router["ping"] = self.c_ping
        self.router["fire"] = self.c_fire
        self.router["auth"] = self.c_auth
        self.last_message = 0
        self.address = "tcp://0.0.0.0:5555"
        self.tubes = []
        self.load_config()
        self.load_tubes()

    def load_config(self, path="~/.madcat/config.json"):
        path = os.path.expanduser(path)
        if os.path.isfile(path) is False:
            raise EnvironmentError(-1,
                                   "MadCat config missing at {}".format(path))
        with open(path) as fh:
            self.config = json.load(fh)

    def run(self):
        try:
            loop = asyncio.get_event_loop()
            loop.create_task(self._lifecycle())
            loop.run_forever()
        except KeyboardInterrupt:
            self.shutdown()

    async def _lifecycle(self):
        while self.active:
            self.establish_socket()
            await self.listen()
            self.disconnect()
        self.shutdown()

    def shutdown(self):
        self.active = False
        print("Shutting down")
        loop = asyncio.get_event_loop()
        loop.close()
        loop.stop()
        exit()

    def load_tubes(self):
        tubes = self.config.get("tubes")
        if not tubes:
            raise ValueError("INIT ERROR: No tube config!")
        for tube_id, pin_id in tubes.items():
            self.tubes.append(Fuse(tube_id, pin_id))

    def establish_socket(self):
        self.context = Context()
        self.socket = self.context.socket(REP)
        self.socket.bind(self.address)

        self.poller = Poller()
        self.poller.register(self.socket, POLLIN)

        self.ping = 0
        while self.auth() is False:
            time.sleep(1)

    def disconnect(self):
        self.socket.disconnect(self.address)

    """
    Authentication
    """

    def auth(self):
        http = urllib3.PoolManager()
        try:
            r = http.request(
                "GET", "http://{address}:{port}/register".format(
                    **self.config.get("battlefield")))
        except urllib3.exceptions.MaxRetryError:
            print("Cannot connect to Battlefield")
            return False
        print("Registered to Battlefield")
        return True

    async def listen(self):
        listening = True
        while listening:
            events = await self.poller.poll(self.ping_rate)
            if events:
                for socket, idx in events:
                    msg = await self.socket.recv_json()
                    self.last_message = time.time()
                    if type(msg) is not dict:
                        self.respond_error("Message not a dict!")
                    if len(msg.keys()) != 1:
                        self.respond_error(
                            "Only one request at a time! (for now)")

                    for command, request in msg.items():
                        if type(request) is not dict:
                            self.respond_error("Request not a dict!")
                        else:
                            self.router.get(command,
                                            self.invalid_command)(**request)
            else:
                print("Missed ping!")
                if (time.time() -
                        self.last_message) > (self.ping_rate / 1000) * 3:
                    print("Connection failed!  Attempting reconnect")
                    listening = False

    def respond_error(self, msg):
        self.socket.send_json({"error": msg})

    def invalid_command(self, **kwargs):
        self.respond_error("not a valid command")

    @property
    def tube_ids(self):
        return [x.id for x in self.tubes]

    def get_tube(self, tube):
        for t in self.tubes:
            if t.id == tube:
                return t
        return None

    def c_fire(self, **kwargs):
        tube = self.get_tube(kwargs.get("tube"))
        if tube is None:
            self.respond_error("I dont own tube {}".format(kwargs.get("tube")))
        else:
            if tube.fired:
                self.respond_error("Tube {} is not armed!".format(tube.id))
            else:
                if tube.fire():
                    self.socket.send_json({"fired": tube.id})
                else:
                    self.respond_error("Tube {} failed to fire!".format(
                        tube.id))

    def c_ping(self, **kwargs):
        self.socket.send_json({"pong": self.ping})
        self.ping += 1

    def c_auth(self, challenge):
        response = self.config["PSK"].format(challenge)
        self.socket.send_json({
            "auth": {
                "response": hashlib.md5(response.encode()).hexdigest(),
                "tubes": self.tube_ids
            }
        })
コード例 #34
0
ファイル: lbbroker2.py プロジェクト: lw000/zmq_guide
def run_broker(loop):
    """ main broker method """
    print('(run_broker) starting')
    url_worker = "inproc://workers"
    url_client = "inproc://clients"
    client_nbr = NBR_CLIENTS * 3
    # Prepare our context and sockets
    context = Context()
    frontend = context.socket(zmq.ROUTER)
    frontend.bind(url_client)
    backend = context.socket(zmq.ROUTER)
    backend.bind(url_worker)
    print('(run_broker) creating workers and clients')
    # create workers and clients threads
    worker_tasks = []
    for idx in range(NBR_WORKERS):
        task = asyncio.ensure_future(run_worker(url_worker, context, idx))
        worker_tasks.append(task)
    client_tasks = []
    for idx in range(NBR_CLIENTS):
        task = asyncio.ensure_future(run_client(url_client, context, idx))
        client_tasks.append(task)
    print('(run_broker) after creating workers and clients')
    # Logic of LRU loop
    # - Poll backend always, frontend only if 1+ worker ready
    # - If worker replies, queue worker as ready and forward reply
    # to client if necessary
    # - If client requests, pop next worker and send request to it
    # Queue of available workers
    available_workers = 0
    workers_list = []
    # init poller
    poller = Poller()
    # Always poll for worker activity on backend
    poller.register(backend, zmq.POLLIN)
    # Poll front-end only if we have available workers
    poller.register(frontend, zmq.POLLIN)
    while True:
        socks = yield from poller.poll()
        socks = dict(socks)
        # Handle worker activity on backend
        if (backend in socks and socks[backend] == zmq.POLLIN):
            # Queue worker address for LRU routing
            message = yield from backend.recv_multipart()
            assert available_workers < NBR_WORKERS
            worker_addr = message[0]
            # add worker back to the list of workers
            available_workers += 1
            workers_list.append(worker_addr)
            #   Second frame is empty
            empty = message[1]
            assert empty == b""
            # Third frame is READY or else a client reply address
            client_addr = message[2]
            # If client reply, send rest back to frontend
            if client_addr != b'READY':
                # Following frame is empty
                empty = message[3]
                assert empty == b""
                reply = message[4]
                yield from frontend.send_multipart([client_addr, b"", reply])
                printdbg(
                    '(run_broker) to frontend -- reply: "{}"'.format(reply))
                client_nbr -= 1
                if client_nbr == 0:
                    printdbg('(run_broker) exiting')
                    break  # Exit after N messages
        # poll on frontend only if workers are available
        if available_workers > 0:
            if (frontend in socks and socks[frontend] == zmq.POLLIN):
                # Now get next client request, route to LRU worker
                # Client request is [address][empty][request]
                response = yield from frontend.recv_multipart()
                [client_addr, empty, request] = response
                assert empty == b""
                #  Dequeue and drop the next worker address
                available_workers += -1
                worker_id = workers_list.pop()
                yield from backend.send_multipart(
                    [worker_id, b"", client_addr, b"", request])
                printdbg(
                    '(run_broker) to backend -- request: "{}"'.format(request))
    #out of infinite loop: do some housekeeping
    printdbg('(run_broker) finished')
    for worker_task in worker_tasks:
        worker_task.cancel()
    printdbg('(run_broker) workers cancelled')
    yield from asyncio.sleep(1)
    frontend.close()
    backend.close()
    #context.term()     # Caution: calling term() blocks.
    loop.stop()
    printdbg('(run_broker) returning')
    return 'finished ok'