Ejemplo n.º 1
0
def run_worker():
    context = Context(1)
    worker = context.socket(zmq.REQ)

    identity = "%04X-%04X" % (randint(0, 0x10000), randint(0, 0x10000))
    worker.setsockopt_string(zmq.IDENTITY, identity)
    worker.connect("tcp://localhost:5556")

    print("I: (%s) worker ready" % identity)
    yield from worker.send_string(LRU_READY)

    cycles = 0
    while True:
        msg = yield from worker.recv_multipart()
        if not msg:
            break

        cycles += 1
        if cycles > 3 and randint(0, 5) == 0:
            print("I: (%s) simulating a crash" % identity)
            break
        elif cycles > 3 and randint(0, 5) == 0:
            print("I: (%s) simulating CPU overload" % identity)
            yield from asyncio.sleep(3)
        print("I: (%s) normal reply" % identity)
        # Do some heavy work
        yield from asyncio.sleep(1)
        yield from worker.send_multipart(msg)
Ejemplo n.º 2
0
 def __init__(self):
     self.logger = getLogger(__name__)
     self.context = Context()
     self.pub_sock = self.context.socket(PUB)
     self.pub_sock.bind("tcp://127.0.0.1:40086")
     self.running = True
     self.handlers = {}
Ejemplo n.º 3
0
 def __init__(self):
     context = Context()
     self.pub = context.socket(zmq.PUB)
     self.sub = context.socket(zmq.SUB)
     self.pub.bind('tcp://127.0.0.1:2000')
     self.sub.connect('tcp://127.0.0.1:2000')
     time.sleep(0.5) # 确保连接完成
Ejemplo n.º 4
0
 def __init__(self, name):
     self.name = name
     self.logger = getLogger(__name__)
     self.context = Context()
     self.req_sock = self.context.socket(REQ)
     self.sub_sock = self.context.socket(SUB)
     self.connect_req_sock()
     self.uuid = str(uuid4())
Ejemplo n.º 5
0
 def get_socket(self):
     if self.is_async:
         context = Context()
     else:
         context = zmq.Context()
     socket = context.socket(zmq.REQ)
     socket.connect(self.server_address)
     self.poll.register(socket, zmq.POLLIN)
     return socket
Ejemplo n.º 6
0
async def main():
    ctx = Context()
    sock = ctx.socket(zmq.PUB)
    sock.bind('tcp://127.0.0.1:12345')
    try:
        await sender(sock)
    finally:
        sock.close(1)
        ctx.destroy()
Ejemplo n.º 7
0
class Server:

    transaction = 0

    def __init__(self):
        self.logger = getLogger(__name__)
        self.context = Context()
        self.pub_sock = self.context.socket(PUB)
        self.pub_sock.bind("tcp://127.0.0.1:40086")
        self.running = True
        self.handlers = {}

    async def recv_rep_and_process(self):
        rep_sock = self.context.socket(REP)
        rep_sock.bind("tcp://127.0.0.1:40085")
        while self.running:
            if await rep_sock.poll(timeout=10) and self.running:
                msg = await rep_sock.recv_json()
                resp = await self.handle_req(msg)
                self.logger.debug('resp: %s', resp)
                await rep_sock.send_json(resp)
        rep_sock.close()

    def stop(self):
        self.logger.info('stopping zmq server ...')
        self.running = False
        self.pub_sock.close()

    async def handle_req(self, msg):
        action = msg.get('action', '')
        del msg['action']
        self.__class__.transaction += 1
        seq = self.__class__.transaction
        if action in self.handlers:
            self.logger.info('handle(seq: %d) %s, with %s', seq, action, msg)
            try:
                resp = await self.handlers[action](**msg)
                self.logger.info('handle(seq: %d) %s, return %s', seq, action,
                                 resp)
                return resp
            except RuntimeError as err:
                self.logger.error('error while handle(seq: %d) %s:\n%s', seq,
                                  action, traceback.format_exc())
                return dict(error=1, message=str(err))
            except Exception as ex:
                self.logger.error(type(ex))
        else:
            self.logger.error('register with action: %s not exist', action)
            return dict(error=1, message='Invalud action: {}'.format(action))

    def register_callback(self, action, func):
        self.handlers[action] = func

    async def notify_service_event(self, service, data):
        await self.pub_sock.send_string(service, flags=SNDMORE)
        self.logger.debug('publish: %s', data)
        await self.pub_sock.send_json(data)
Ejemplo n.º 8
0
async def main():
    ctx = Context()
    sock = ctx.socket(zmq.SUB)
    sock.connect('tcp://127.0.0.1:12345')
    sock.subscribe(b'')
    try:
        await receiver(sock)
    finally:
        sock.close(1)
        ctx.destroy()
Ejemplo n.º 9
0
def main(context: Context):
    config_manager = ConfigurationManager()
    config = config_manager.config
    rcs = RCSnail()
    rcs.sign_in_with_email_and_password(os.getenv('RCS_USERNAME', ''),
                                        os.getenv('RCS_PASSWORD', ''))

    loop = asyncio.get_event_loop()

    data_queue = context.socket(zmq.PUB)
    loop.run_until_complete(
        initialize_publisher(data_queue, config.data_queue_port))

    controls_queue = context.socket(zmq.SUB)
    loop.run_until_complete(
        initialize_subscriber(controls_queue, config.controls_queue_port))

    pygame_event_queue = asyncio.Queue()
    pygame.init()
    pygame.display.set_caption("RCSnail Connector")

    screen = pygame.display.set_mode(
        (config.window_width, config.window_height))
    interceptor = Interceptor(config, data_queue, controls_queue)
    car = JoystickCar(config,
                      send_car_state=interceptor.send_car_state,
                      recv_car_controls=interceptor.recv_car_controls)
    renderer = JoystickRenderer(config, screen, car)
    renderer.init_controllers()
    interceptor.set_renderer(renderer)

    pygame_task = loop.run_in_executor(None, renderer.pygame_event_loop, loop,
                                       pygame_event_queue)
    render_task = asyncio.ensure_future(renderer.render(rcs))
    event_task = asyncio.ensure_future(
        renderer.register_pygame_events(pygame_event_queue))
    queue_task = asyncio.ensure_future(
        rcs.enqueue(loop,
                    interceptor.new_frame,
                    interceptor.new_telemetry,
                    track=config.track,
                    car=config.car))

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        print("Closing due to keyboard interrupt.")
    finally:
        queue_task.cancel()
        pygame_task.cancel()
        render_task.cancel()
        event_task.cancel()
        pygame.quit()
        asyncio.ensure_future(rcs.close_client_session())
Ejemplo n.º 10
0
    def connect(self):
        self.context = Context()

        self.sub_socket = self.context.socket(socket_type=zmq.SUB)
        self.pub_socket = self.context.socket(socket_type=zmq.PUB)
        self.pub_socket.connect(self.publisher_url)

        self.sub_socket.bind(self.subscriber_url)

        for filter in self.filters:
            self.sub_socket.subscribe(filter)
Ejemplo n.º 11
0
    def establish_socket(self):
        self.context = Context()
        self.socket = self.context.socket(REP)
        self.socket.bind(self.address)

        self.poller = Poller()
        self.poller.register(self.socket, POLLIN)

        self.ping = 0
        while self.auth() is False:
            time.sleep(1)
Ejemplo n.º 12
0
    def __init__(self, stop_on_loop):
        self.stop_on_loop = stop_on_loop  # for integration tests
        self.messages_queue = asyncio.Queue()
        self.context = Context()
        self.sleep_sec = 1
        self.loops = 0

        self.receiver_socket = self.context.socket(zmq.REP)
        self.receiver_socket.bind("tcp://127.0.0.1:8000")

        self.pub_socket = self.context.socket(zmq.PUB)
        self.pub_socket.bind("tcp://*:8001")
Ejemplo n.º 13
0
def run_queue():
    context = Context(1)

    frontend = context.socket(zmq.ROUTER)    # ROUTER
    backend = context.socket(zmq.ROUTER)     # ROUTER
    frontend.bind("tcp://*:5555")            # For clients
    backend.bind("tcp://*:5556")             # For workers

    poll_workers = Poller()
    poll_workers.register(backend, zmq.POLLIN)

    poll_both = Poller()
    poll_both.register(frontend, zmq.POLLIN)
    poll_both.register(backend, zmq.POLLIN)

    workers = []

    while True:
        if workers:
            socks = yield from poll_both.poll()
        else:
            socks = yield from poll_workers.poll()
        socks = dict(socks)

        # Handle worker activity on backend
        if socks.get(backend) == zmq.POLLIN:
            # Use worker address for LRU routing
            msg = yield from backend.recv_multipart()
            if not msg:
                break
            print('I: received msg: {}'.format(msg))
            address = msg[0]
            workers.append(address)

            # Everything after the second (delimiter) frame is reply
            reply = msg[2:]

            # Forward message to client if it's not a READY
            if reply[0] != LRU_READY:
                print('I: sending -- reply: {}'.format(reply))
                yield from frontend.send_multipart(reply)
            else:
                print('I: received ready -- address: {}'.format(address))

        if socks.get(frontend) == zmq.POLLIN:
            # Get client request, route to first available worker
            msg = yield from frontend.recv_multipart()
            worker = workers.pop(0)
            request = [worker, b''] + msg
            print('I: sending -- worker: {}  msg: {}'.format(worker, msg))
            yield from backend.send_multipart(request)
Ejemplo n.º 14
0
    def reconnect(self):
        '''connects or reconnects to the broker'''
        if self.poller:
            self.poller.unregister(self.socket)
        if self.socket: 
            self.socket.close()
        self.context = Context()
        self.socket = self.context.socket(zmq.DEALER)
        self.socket.connect(self.QWeatherStationIP + ':' + self.QWeatherStationSocket)
        self.subsocket = self.context.socket(zmq.SUB)
        self.subsocket.connect(self.QWeatherStationIP + ':' + str(int(self.QWeatherStationSocket) + SUBSOCKET))

        self.poller = Poller()
        self.poller.register(self.socket,zmq.POLLIN)
        self.poller.register(self.subsocket,zmq.POLLIN)
Ejemplo n.º 15
0
async def send_batch_worker(buffer, exe, event, lock, batch_size, beta, actor_num, actor_ips):
    """
    coroutine to send training batches to learner
    """
    seed = int(str(time.time())[-4:])
    utils.set_global_seeds(seed, use_torch=False)
    loop = asyncio.get_event_loop()
    ctx = Context.instance()
    socket = ctx.socket(zmq.DEALER)
    socket.connect("ipc:///tmp/5103.ipc")

    actors_sockets = []
    for i in range(actor_num):
        ctx = zmq.Context()
        socket = ctx.socket(zmq.DEALER)
        socket.connect('tcp://{}:51004'.format(actor_ips[i]))
        actors_sockets.append(socket)

    await event.wait()
    while True:
        identity, _ = await socket.recv_multipart(copy=False)
        # TODO: Is there any other greay way to support lock but make sampling faster?
        async with lock:
            batch = await loop.run_in_executor(exe, sample_batch, buffer, batch_size, beta, actors_sockets)
        await socket.send_multipart([identity, batch], copy=False)
        batch = None
    return True
Ejemplo n.º 16
0
async def zmq_iota_recv():
    ctx = Context.instance()
    print("Connecting to ZMQ...")
    s = ctx.socket(zmq.SUB)
    s.connect('tcp://%s:%s' % (IOTA_HOST, IOTA_ZMQ_PORT))
    print("Subscribing to tx_trytes...")
    s.subscribe(b"tx_trytes")
    while True:
        msg = await s.recv()
        #print('received', msg)
        topic, data, hash_ = msg.split(b' ')
        str_data = data.decode('ascii')
        str_hash = hash_.decode('ascii')

        tx = Transaction.from_tryte_string(data, hash_)

        print(str(tx.address), connection_addresses.get(str(tx.address), []),
              repr(connection_addresses))
        tasks = [
            send_json(
                subscription.connection, {
                    'id': subscription.id,
                    'type': 'transaction',
                    'data': str_data,
                    'hash': str_hash
                })
            for subscription in connection_addresses.get(str(tx.address), [])
        ]
        if tasks:
            await asyncio.wait(tasks)
    s.close()
Ejemplo n.º 17
0
class Client:

    def __init__(self, name):
        self.name = name
        self.logger = getLogger(__name__)
        self.context = Context()
        self.req_sock = self.context.socket(REQ)
        self.sub_sock = self.context.socket(SUB)
        self.connect_req_sock()
        self.uuid = str(uuid4())

    def connect_req_sock(self):
        self.logger.debug('client start to connect ...')
        self.req_sock.connect("tcp://127.0.0.1:40085")
        self.logger.debug('client connected')

    async def register(self, active_count=1):
        msg = dict(
            action='register',
            name=self.name,
            uuid=self.uuid,
            active_count=active_count)
        await self.req_sock.send_json(msg)
        resp = await self.req_sock.recv_json()
        if not resp or resp.get('error') != 0:
            raise RuntimeError(str(resp))

    async def keepalive(self):
        msg = dict(
            action='keepalive',
            name=self.name,
            uuid=self.uuid)
        await self.req_sock.send_json(msg)
        self.logger.debug('waiting keepalive resp ...')
        resp = await self.req_sock.recv_json()
        self.logger.debug('resp for keepalive: %s', resp)
        if not resp or resp.get('error') != 0:
            raise RuntimeError(str(resp))

    async def unregister(self):
        msg = dict(
            action='unregister',
            name=self.name,
            uuid=self.uuid)
        await self.req_sock.send_json(msg)
        await self.req_sock.recv_json()

    def subscribe(self, topic=None):
        if not topic:
            topic = self.name
        self.sub_sock.connect("tcp://127.0.0.1:40086")
        self.sub_sock.setsockopt_string(SUBSCRIBE, topic)
        self.logger.info('subscribe with topic: %s', topic)

    async def fetch_event(self):
        _, msg = await self.sub_sock.recv_multipart()
        self.logger.info('event: %s', msg)
        event = json.loads(msg.decode('utf-8'))
        self.logger.info('event: %s', event)
        return event
Ejemplo n.º 18
0
async def events_publish(cn, payload):
    '''
    publish a lot of events
    @cn     number of events for publishing
    @paload even size in bytes
    '''

    ctx = Context.instance()
    pub = ctx.socket(zmq.PUB)
    pub.connect(pub_url)
    print(f'publisher url {pub_url}, counter {cn}')
    #mandatory delay
    time.sleep(1)

    print('start perfomance measureing...')
    evnt = [topic, b'perf start']
    await pub.send_multipart(evnt)
    while cn:
        cn = cn - 1
        evnt = [topic, payload]
        await pub.send_multipart(evnt)

    evnt = [topic, b'perf stop']
    await pub.send_multipart(evnt)
    print('stop perfomance measureing...')
Ejemplo n.º 19
0
    async def __zmqSimpleClient(self, zmqConf):
        self.assertTrue(ZMQInterface.CONF_IP_ADDRESS in zmqConf)
        self.assertTrue(ZMQInterface.CONF_PORT in zmqConf)

        await asyncio.sleep(1)

        try:
            zmqCtx = Context.instance()

            address = 'tcp://%s:%s' % (str(
                zmqConf[ZMQInterface.CONF_IP_ADDRESS]),
                                       str(zmqConf[ZMQInterface.CONF_PORT]))

            socket = zmqCtx.socket(zmq.REQ)
            socket.connect(address)

            await socket.send(
                str.encode(json.dumps({"request": "resourcesInfo"})))

            logging.info("request sent - waiting for response")

            resp = await socket.recv()

            logging.info("received response: %s" % bytes.decode(resp))
        except:
            logging.exception("Client failed")

        socket.close()

        logging.info("client finishing")
Ejemplo n.º 20
0
 def __init__(self):
     self.stop = False
     self.conx = Context.instance()
     self.socket = self.conx.socket(zmq.ROUTER)
     self.socket.bind('tcp://*:5555')
     self.periodic = PeriodicCallback(self.timer, 4000)
     self.periodic.start()
Ejemplo n.º 21
0
def run(loop):
    printdbg('(run) starting')
    context = Context()
    server = Server(loop, context)
    tasks = server.run_server()
    loop.run_until_complete(asyncio.wait(tasks))
    printdbg('(run) finished')
Ejemplo n.º 22
0
 def __init__(self, address):
     self.pull = self.poller = None
     self.monitor_poller = self.monitor_socket = None
     self.address = address
     self.context = Context.instance()
     self.open()
     self.first_missing = None
Ejemplo n.º 23
0
class async_zmq_streaming_subscriber(object):
    def __init__(self, zmp_subscribing_port: int):
        super(async_zmq_streaming_subscriber, self).__init__()
        self._port = zmp_subscribing_port
        self.zmq_context = Context()
        self.zmq_bingding_socket = self.zmq_context.socket(SUB)

        self.zmq_bingding_socket.setsockopt(TCP_KEEPALIVE, 1)  # 保活
        self.zmq_bingding_socket.setsockopt(TCP_KEEPALIVE_CNT,
                                            5)  # 保活包没有响应超过5次,执行重连
        self.zmq_bingding_socket.setsockopt(TCP_KEEPALIVE_IDLE,
                                            60)  #空闲超过60秒判定需要发送保活包
        self.zmq_bingding_socket.setsockopt(TCP_KEEPALIVE_INTVL, 3)  #保活包发送间隔3秒
        self.zmq_bingding_socket.setsockopt_string(SUBSCRIBE, "")  #指定订阅频道,必须

        zmq_sub_address = f"tcp://*:{zmp_subscribing_port}"
        self.zmq_bingding_socket.bind(zmq_sub_address)
        pass

    async def loop_runner(self):

        print(f"zmq端口{self._port}开始sub监听")

        while True:
            msg = await self.zmq_bingding_socket.recv()
            print(msg)  #显示订阅收到的消息

    def run(self):
        run(self.loop_runner())
Ejemplo n.º 24
0
    def setup(self, conf):
        """Open ZMQ interface.

        If port number is not specified in QCG-PilotJob configuration, it is chosen randomly from configured range.
        """
        #        zmq.asyncio.install()
        self.zmq_ctx = Context.instance()

        self.address = Config.ZMQ_IFACE_ADDRESS.get(conf)

        self.socket = self.zmq_ctx.socket(zmq.REP)  #pylint: disable=maybe-no-member

        if Config.ZMQ_PORT.get(conf):
            self.socket.bind(self.address)
        else:
            self.local_port = self.socket.bind_to_random_port(
                self.address,
                min_port=int(Config.ZMQ_PORT_MIN_RANGE.get(conf)),
                max_port=int(Config.ZMQ_PORT_MAX_RANGE.get(conf)))

        self.real_address = str(
            bytes.decode(self.socket.getsockopt(zmq.LAST_ENDPOINT)))  #pylint: disable=maybe-no-member

        # the real address might contain the 0.0.0.0 IP address which means that it listens on all
        # interfaces, sadly this address is not valid for external services to communicate, so we
        # need to replace 0.0.0.0 with the real address IP
        self.external_address = self.real_address
        if '//0.0.0.0:' in self.real_address:
            self.external_address = self.real_address.replace(
                '//0.0.0.0:',
                '//{}:'.format(socket.gethostbyname(socket.gethostname())))

        _logger.info(
            'ZMQ interface configured (address %s) @ %s, external address @ %s',
            self.address, self.real_address, self.external_address)
Ejemplo n.º 25
0
class RPC(WSRPCHandler):
    """Application RPC. RPC methods should start with the `rpc_` prefix"""

    def __init__(self, loop=None):
        if loop is None:
            loop = asyncio.get_event_loop()
        self._loop = loop
        self._context = Context()

    async def rpc_echo(self, ws, method, blob):
        ws.send_bytes(blob)

    async def rpc_echo_worker(self, ws, method, blob):
        socket = self._context.socket(zmq.DEALER)
        socket.connect('tcp://localhost:5559')
        await socket.send_multipart([b'', blob])

        message = await socket.recv_multipart()
        assert message[-1] == blob, '%s does not equal %s' % (
            message[-1], blob)
        ws.send_bytes(message[-1])

        # Echo worker streams `closing` after echoing
        message = await socket.recv_multipart()
        assert message[-1] == b'closing', '%s does not equal %s' % (
            message1[-1], 'closing')
        ws.send_bytes(message[-1])
Ejemplo n.º 26
0
    def __init__(self):
        super().__init__()
        self.ctx = Context.instance()
        #cmd socket is a synchronous socket, we don't want the asyncio context.
        self.cmd_socket = zmq.Context().socket(zmq.REQ)
        self.cmd_socket.connect("tcp://127.0.0.1:{}".format(
            os.environ.get('MODEL_PORT', 12312)))
        init_vals = self.get_initial_values()
        mag_pvs = {
            device_name:
            MagnetPV(device_name,
                     simulacrum.util.convert_device_to_element(device_name),
                     self.on_magnet_change,
                     length=init_vals[device_name]['length'],
                     initial_value=init_vals[device_name],
                     prefix=device_name)
            for device_name in simulacrum.util.device_names
            if device_name.startswith("XCOR") or device_name.startswith("YCOR")
            or device_name.startswith("QUAD") or device_name.startswith("BEND")
        }
        self.add_pvs(mag_pvs)
        # Now that we've set up all the magnets, we need to send the model a
        # command to use non-normalized magnetic field units.
        self.cmd_socket.send_pyobj({
            "cmd":
            "tao",
            "val":
            "set ele Kicker::*,Quadrupole::* field_master = T"
        })
        self.cmd_socket.recv_pyobj()

        L.info("Initialization complete.")
Ejemplo n.º 27
0
async def download_file(context: Context, dirserv_sock: Socket,
                        filename: str) -> bytes:
    devices = await get_file_declared_devices(dirserv_sock, filename)
    all_declared_addresses = []
    for dev_name in devices:
        addresses = await get_devices_declared_addresses(
            dirserv_sock, dev_name)
        all_declared_addresses += addresses
    used_address = choice(
        all_declared_addresses
    )  # we use only one connection to 'download' files here,
    # but a complete implementation must download them from different devices to speed up the process
    print("download_file(): using address {}".format(used_address))
    download_sock: Socket = context.socket(zmq.REQ)
    download_sock.connect(used_address)
    await download_sock.send_multipart(
        [b"fs.read_file", bytes(filename, 'utf8')])
    frames: List[bytes] = await asyncio.wait_for(
        download_sock.recv_multipart(), 5)
    # This is just a sample protocol, and it does not need complex functions to deal with big contents
    download_sock.close()
    if frames[0][0] == 0:
        return frames.pop(1)
    else:
        return None
Ejemplo n.º 28
0
    def __init__(self, sub_topic_list: list, pub_topic_list: list):
        self.pub_topic_list = pub_topic_list + [b'disconnect']
        self.messages_to_send = asyncio.Queue()
        self.messages_received = asyncio.Queue()

        self.client_log = log_sys.get_logger('client')
        self.client_log.set_level(INFO)

        self.pub_url = 'tcp://127.0.0.1:'
        self.broker_listen_url = 'tcp://127.0.0.1:9500'
        self.broker_pub_url = 'tcp://127.0.0.1:9501'

        self.ctx = Context.instance()

        self.pub_sock = self.ctx.socket(zmq.PUB)
        self.sub_sock = self.ctx.socket(zmq.SUB)

        self.sub_sock.connect(self.broker_pub_url)

        sub_topic_list += [b'disconnect']
        for st in sub_topic_list:
            self.sub_sock.setsockopt(zmq.SUBSCRIBE, st)

        self.loop = None
        self.tasks_h = None
        self.is_connected = False
        self.client_tasks = [self._subscribe_task(), self._publish_task()]
        self.all_tasks = []
Ejemplo n.º 29
0
 def __init__(self):
     super().__init__()
     self.ctx = Context.instance()
     #cmd socket is a synchronous socket, we don't want the asyncio context.
     self.cmd_socket = zmq.Context().socket(zmq.REQ)
     self.cmd_socket.connect("tcp://127.0.0.1:{}".format(
         os.environ.get('MODEL_PORT', 12312)))
     init_vals = self.get_cavity_ACTs_from_model()
     cav_pvs = {
         device_name: CavityPV(device_name,
                               self.on_cavity_change,
                               initial_values=init_vals[device_name],
                               prefix=device_name)
         for device_name in init_vals.keys()
     }
     #setting up convenient linac section PVs for changing all of the L1B/L2B/L3B cavities simultaneously.
     linac_init_vals = _make_linac_table(init_vals)
     linac_pvs = {
         device_name: CavityPV(device_name,
                               self.on_cavity_change,
                               initial_values=linac_init_vals[device_name],
                               prefix=device_name)
         for device_name in linac_init_vals.keys()
     }
     self.add_pvs(cav_pvs)
     self.add_pvs(linac_pvs)
     L.info("Initialization complete.")
Ejemplo n.º 30
0
def run(loop, ident, num_workers):
    context = Context()
    tasks = [
        asyncio.ensure_future(run_worker(context, '%s-%d' % (ident, idx)))
        for idx in range(num_workers)
    ]
    loop.run_until_complete(asyncio.wait(tasks))
Ejemplo n.º 31
0
async def recv_batch_worker(buffer, exe, event, lock, threshold_size):
    """
    coroutine to receive batch from actors
    """
    loop = asyncio.get_event_loop()
    ctx = Context.instance()
    socket = ctx.socket(zmq.DEALER)
    socket.connect("ipc:///tmp/5101.ipc")

    start = False
    cnt = 0
    ts = time.time()

    while True:
        identity, data = await socket.recv_multipart(copy=False)
        async with lock:
            await loop.run_in_executor(exe, push_batch, buffer, data)
        await socket.send_multipart((identity, b''))
        # TODO: 1. Only one worker should print log to console.
        #       2. Hard-coded part in (50 * cnt * 4) should be fixed.
        data = None
        cnt += 1
        if cnt % 100 == 0:
            print("Buffer Size: {} / FPS: {:.2f}".format(
                len(buffer), (50 * cnt * 4) / (time.time() - ts)
            ))
            ts = time.time()
            if not start and len(buffer) >= threshold_size:
                start = True
                event.set()
    return True
Ejemplo n.º 32
0
async def eddn(bot):
    context = Context.instance()
    subscriber = context.socket(zmq.SUB)
    
    subscriber.subscribe(b"")
    subscriber.set(zmq.RCVTIMEO, __timeoutEDDN)
    
    allowed_events = ['Location', 'FSDJump']
    
    bgs = bot.get_cog('BGS')
    if bgs is None:
        bot.load_extension('cogs.bgs')
        bgs = bot.get_cog('BGS')

    while bot.bgs_run:
        try:
            subscriber.connect(__relayEDDN)
            
            while bot.bgs_run:
                __message = await subscriber.recv()
                
                if not __message:
                    subscriber.disconnect(__relayEDDN)
                    break
                
                __message = zlib.decompress(__message)
                if ("prismatic imperium" in str(__message).lower()
                        or "adamantine union" in str(__message).lower()
                        or "colonists of aurora" in str(__message).lower()):

                    __json = json.loads(__message, object_hook=lambda d: SimpleNamespace(**d))
                    message = __json.message
                    if message.event in allowed_events:
                        await bgs.submit(message)
        
        except zmq.ZMQError as e:
            print('ZMQSocketException: ' + str(e))
            sys.stdout.flush()
            subscriber.disconnect(__relayEDDN)
            time.sleep(5)
            
        except Exception as error:
            embed = discord.Embed(title='Command Exception', color=discord.Color.red())
            embed.set_footer(text='Occured on')
            embed.timestamp = datetime.datetime.utcnow()
    
            exc = ''.join(traceback.format_exception(type(error), error, error.__traceback__, chain=False))
            exc = exc.replace('`', '\u200b`')
            embed.description = '```py\n{}\n```'.format(exc)
    
            embed.add_field(name='EDDN error', value="EDDN encountered an error")
    
            try:
                for channel in config.ERROR_CHANNELS:
                    await bot.get_channel(channel).send(type(error), embed=embed)
            except Exception as error:
                print(error)

            subscriber.disconnect(__relayEDDN)
            time.sleep(5)
Ejemplo n.º 33
0
def step1(loop, context):
    """Step 1"""
    context = context or Context.instance()
    # Signal downstream to step 2
    sender = context.socket(zmq.PAIR)
    sender.connect("inproc://step2")
    msg = b'message from step1'
    yield from sender.send(msg)
    print('step1 -- sent msg: {}'.format(msg))
Ejemplo n.º 34
0
def run(loop):
    context = Context.instance()
    client = context.socket(zmq.ROUTER)
    client.bind("tcp://*:5671")
    tasks = [
        asyncio.ensure_future(worker_task(idx)) for idx in range(NBR_WORKERS)
    ]
    tasks.append(asyncio.ensure_future(requestor(client)))
    loop.run_until_complete(asyncio.wait(tasks))
    for task in tasks:
        print('result: {}'.format(task.result()))
Ejemplo n.º 35
0
    def __init__(self):
        self.ctx = Context.instance()
        target_set = {'build_queue'}

        self.socket = self.ctx.socket(zmq.SUB)
        self.socket.connect(OUT)
        for target in target_set:
            self.socket.setsockopt(zmq.SUBSCRIBE, target.encode('utf-8'))

        self.queue = asyncio.Queue()
        self.dont_build = set()
Ejemplo n.º 36
0
def run(loop):
    context = Context.instance()
    client = context.socket(zmq.ROUTER)
    client.bind(CONNECTION_ADDRESS)
    tasks = [
        asyncio.ensure_future(worker_a(context)),
        asyncio.ensure_future(worker_b(context)),
        asyncio.ensure_future(dealer(client)),
    ]
    loop.run_until_complete(asyncio.wait(tasks))
    for task in tasks:
        print("result: {}".format(task.result()))
Ejemplo n.º 37
0
def run_server():
    context = Context()
    server = context.socket(zmq.REP)
    server.bind(SERVER_ADDR)
    cycles = 0
    while True:
        request = yield from server.recv()
        cycles += 1
        # Simulate various problems, after a few cycles
        if cycles > 3 and randint(0, 3) == 0:
            print("I: Simulating a crash")
            server.unbind(SERVER_ADDR)
            # Delay for a bit, else we get "Address already in use" error.
            # Note that to really simulate a crash, we should probably kill
            # this process and start another.
            yield from asyncio.sleep(2)
            break
        elif cycles > 3 and randint(0, 3) == 0:
            print("I: Simulating CPU overload")
            yield from asyncio.sleep(2)
        print("I: Normal request (%s)" % request)
        yield from asyncio.sleep(1)       # Do some heavy work
        yield from server.send(request)
    return (context, server)
Ejemplo n.º 38
0
def step2(loop, context):
    """Step 2"""
    context = context or Context.instance()
    # Bind to inproc: endpoint, then start upstream thread
    receiver = context.socket(zmq.PAIR)
    receiver.bind("inproc://step2")
    # Wait for signal
    printdbg('(step2) waiting for signal')
    msg = yield from receiver.recv()
    print('step2 -- received msg: {}'.format(msg))
    # Signal downstream to step 3
    sender = context.socket(zmq.PAIR)
    sender.connect("inproc://step3")
    msg = b'message from step2'
    yield from sender.send(msg)
    print('step2 -- sent msg: {}'.format(msg))
Ejemplo n.º 39
0
    def __init__(self, url):
        self._url = url

        self._ctx = Context.instance()
        self._socket = self._ctx.socket(zmq.DEALER)
        self._socket.identity = uuid.uuid4().hex.encode()[0:16]

        self._msg_router = _MessageRouter()
        self._receiver = _Receiver(self._socket, self._msg_router)
        self._sender = _Sender(self._socket, self._msg_router)

        self._recv_task = None

        # Monitoring properties
        self._monitor_sock = None
        self._monitor_fd = None
        self._monitor_task = None
    def __init__(self, url='127.0.0.1', port='5555'):
        # get ZeroMQ version
        print("Current libzmq version is %s" % zmq.zmq_version())
        print("Current  pyzmq version is %s" % zmq.__version__)

        self.url = "tcp://{}:{}".format(url, port)
        # pub/sub and dealer/router
        self.ctx = Context.instance()

        # init hello world publisher obj
        self.hello_world = HelloWorld()


        # activate publishers / subscribers
        asyncio.get_event_loop().run_until_complete(asyncio.wait([
            self.hello_world_pub(),
            self.hello_world_sub(),
            self.lang_changer_router(),  # less restrictions than REP
            self.lang_changer_dealer(),  # less restrictions than REQ
        ]))
Ejemplo n.º 41
0
def worker_task(id, context=None):
    context = context or Context.instance()
    worker = context.socket(zmq.REQ)
    # We use a string identity for ease here
    zhelpers.set_id(worker)
    worker.connect("tcp://localhost:5671")
    total = 0
    while True:
        # Tell the router we're ready for work
        yield from worker.send(b"ready")
        # Get workload from router, until finished
        workload = yield from worker.recv()
        #print('worker {} received: {}'.format(id, workload))
        finished = workload == b"END"
        if finished:
            print("worker %d processed: %d tasks" % (id, total))
            break
        total += 1
        # Do some random work
        yield from asyncio.sleep(0.1 * random.random())
    return ('worker {}'.format(id), total)
Ejemplo n.º 42
0
def run():
    ''' Run Ironhouse example '''

    # These directories are generated by the generate_certificates script
    base_dir = os.path.dirname(__file__)
    keys_dir = os.path.join(base_dir, 'certificates')
    public_keys_dir = os.path.join(base_dir, 'public_keys')
    secret_keys_dir = os.path.join(base_dir, 'private_keys')

    if not (os.path.exists(keys_dir) and
            os.path.exists(public_keys_dir) and
            os.path.exists(secret_keys_dir)):
        logging.critical("Certificates are missing - run generate_certificates.py script first")
        sys.exit(1)

    ctx = Context.instance()

    # Start an authenticator for this context.
    auth = AsyncioAuthenticator(ctx)
    auth.start()
    auth.allow('127.0.0.1')
    # Tell authenticator to use the certificate in a directory
    auth.configure_curve(domain='*', location=public_keys_dir)

    server = ctx.socket(zmq.PUSH)

    server_secret_file = os.path.join(secret_keys_dir, "server.key_secret")
    server_public, server_secret = zmq.auth.load_certificate(server_secret_file)
    server.curve_secretkey = server_secret
    server.curve_publickey = server_public
    server.curve_server = True  # must come before bind
    server.bind('tcp://*:9000')

    client = ctx.socket(zmq.PULL)

    # We need two certificates, one for the client and one for
    # the server. The client must know the server's public key
    # to make a CURVE connection.
    client_secret_file = os.path.join(secret_keys_dir, "client.key_secret")
    client_public, client_secret = zmq.auth.load_certificate(client_secret_file)
    client.curve_secretkey = client_secret
    client.curve_publickey = client_public

    server_public_file = os.path.join(public_keys_dir, "server.key")
    server_public, _ = zmq.auth.load_certificate(server_public_file)
    # The client must know the server's public key to make a CURVE connection.
    client.curve_serverkey = server_public
    client.connect('tcp://127.0.0.1:9000')

    yield from server.send(b"Hello")

    if (yield from client.poll(1000)):
        msg = yield from client.recv()
        if msg == b"Hello":
            logging.info("Ironhouse test OK")
    else:
        logging.error("Ironhouse test FAIL")


    # close sockets
    server.close()
    client.close()
    # stop auth task
    auth.stop()
Ejemplo n.º 43
0
"""Example using zmq with asyncio coroutines"""
# Copyright (c) PyZMQ Developers.
# This example is in the public domain (CC-0)

import time

import zmq
from zmq.asyncio import Context, Poller
import asyncio

url = 'tcp://127.0.0.1:5555'

ctx = Context.instance()


async def ping():
    """print dots to indicate idleness"""
    while True:
        await asyncio.sleep(0.5)
        print('.')


async def receiver():
    """receive messages with polling"""
    pull = ctx.socket(zmq.PULL)
    pull.connect(url)
    poller = Poller()
    poller.register(pull, zmq.POLLIN)
    while True:
        events = await poller.poll()
        if pull in dict(events):
Ejemplo n.º 44
0
def run_broker(loop):
    """ main broker method """
    print('(run_broker) starting')
    url_worker = "inproc://workers"
    url_client = "inproc://clients"
    client_nbr = NBR_CLIENTS * 3
    # Prepare our context and sockets
    context = Context()
    frontend = context.socket(zmq.ROUTER)
    frontend.bind(url_client)
    backend = context.socket(zmq.ROUTER)
    backend.bind(url_worker)
    print('(run_broker) creating workers and clients')
    # create workers and clients threads
    worker_tasks = []
    for idx in range(NBR_WORKERS):
        task = asyncio.ensure_future(run_worker(url_worker, context, idx))
        worker_tasks.append(task)
    client_tasks = []
    for idx in range(NBR_CLIENTS):
        task = asyncio.ensure_future(run_client(url_client, context, idx))
        client_tasks.append(task)
    print('(run_broker) after creating workers and clients')
    # Logic of LRU loop
    # - Poll backend always, frontend only if 1+ worker ready
    # - If worker replies, queue worker as ready and forward reply
    # to client if necessary
    # - If client requests, pop next worker and send request to it
    # Queue of available workers
    available_workers = 0
    workers_list = []
    # init poller
    poller = Poller()
    # Always poll for worker activity on backend
    poller.register(backend, zmq.POLLIN)
    # Poll front-end only if we have available workers
    poller.register(frontend, zmq.POLLIN)
    while True:
        socks = yield from poller.poll()
        socks = dict(socks)
        # Handle worker activity on backend
        if (backend in socks and socks[backend] == zmq.POLLIN):
            # Queue worker address for LRU routing
            message = yield from backend.recv_multipart()
            assert available_workers < NBR_WORKERS
            worker_addr = message[0]
            # add worker back to the list of workers
            available_workers += 1
            workers_list.append(worker_addr)
            #   Second frame is empty
            empty = message[1]
            assert empty == b""
            # Third frame is READY or else a client reply address
            client_addr = message[2]
            # If client reply, send rest back to frontend
            if client_addr != b'READY':
                # Following frame is empty
                empty = message[3]
                assert empty == b""
                reply = message[4]
                yield from frontend.send_multipart([client_addr, b"", reply])
                printdbg('(run_broker) to frontend -- reply: "{}"'.format(
                    reply))
                client_nbr -= 1
                if client_nbr == 0:
                    printdbg('(run_broker) exiting')
                    break   # Exit after N messages
        # poll on frontend only if workers are available
        if available_workers > 0:
            if (frontend in socks and socks[frontend] == zmq.POLLIN):
                # Now get next client request, route to LRU worker
                # Client request is [address][empty][request]
                response = yield from frontend.recv_multipart()
                [client_addr, empty, request] = response
                assert empty == b""
                #  Dequeue and drop the next worker address
                available_workers += -1
                worker_id = workers_list.pop()
                yield from backend.send_multipart(
                    [worker_id, b"", client_addr, b"", request])
                printdbg('(run_broker) to backend -- request: "{}"'.format(
                    request))
    #out of infinite loop: do some housekeeping
    printdbg('(run_broker) finished')
    for worker_task in worker_tasks:
        worker_task.cancel()
    printdbg('(run_broker) workers cancelled')
    yield from asyncio.sleep(1)
    frontend.close()
    backend.close()
    #context.term()     # Caution: calling term() blocks.
    loop.stop()
    printdbg('(run_broker) returning')
    return 'finished ok'
Ejemplo n.º 45
0
async def _report_task_result(context : Context, task_info : TaskInformation):

    sock = context.socket(zmq.DEALER)
    sock.connect(task_info.result_receiver_address.to_zeromq_addr())
Ejemplo n.º 46
0
 def __init__(self, loop=None):
     if loop is None:
         loop = asyncio.get_event_loop()
     self._loop = loop
     self._context = Context()
Ejemplo n.º 47
0
def _worker_main(id, slave_addr, task):

    print("_worker_main")

    import zmq
    from zmq.asyncio import Context, ZMQEventLoop
    import asyncio
    from ..common.task import SleepTaskResult
    from .task import SleepTask

    def _resolve_msg(msg):
        print(msg)
        #addr = msg[0]
        #assert msg[1] == b""
        header = msg[0]
        assert msg[1] == b""
        body = msg[2]

        return header, body

    def _dispatch_msg(header, body = b""):
        async def _dispatch_msg(msg):
            await socket.send_multipart(msg)

        msg = [id.encode(encoding='utf-8'), b'', header, b'', body]
        asyncio.ensure_future(_dispatch_msg(msg))

    def __dispatch_msg(header, body=b""):
        def _dispatch_msg(msg):
            socket.send_multipart(msg)

        msg = [id.encode(encoding='utf-8'), b'', header, b'', body]
        _dispatch_msg(msg)

    def _process_sleep_task(task):
        async def __process_sleep_task(task):
            await asyncio.sleep(task.job.seconds)
            task.result = SleepTaskResult("Sleep " + str(task.job.seconds) + "By " + id)
            _dispatch_msg(b"TaskFinish", task.result.to_bytes())

        asyncio.ensure_future(__process_sleep_task(task))

    async def _run_worker():
        _dispatch_msg(b"TaskStart")
        if isinstance(task, SleepTask):
            _process_sleep_task(task)
        else:
            raise ValueError("Invalid Task Type.")

        while True:
            msg = await socket.recv_multipart()
            header, body = _resolve_msg(msg)
            # some codes will be filled later.
            break

    print("[Worker {0}] I'm created!".format(id))

    loop = ZMQEventLoop()
    asyncio.set_event_loop(loop)

    context = Context()
    socket = context.socket(zmq.DEALER)

    socket.connect(slave_addr)

    """
    policy = asyncio.get_event_loop_policy()
    policy.set_event_loop(policy.new_event_loop())
    loop = asyncio.get_event_loop()
    """

    loop.run_until_complete(_run_worker())