Ejemplo n.º 1
0
def run_queue():
    context = Context(1)

    frontend = context.socket(zmq.ROUTER)    # ROUTER
    backend = context.socket(zmq.ROUTER)     # ROUTER
    frontend.bind("tcp://*:5555")            # For clients
    backend.bind("tcp://*:5556")             # For workers

    poll_workers = Poller()
    poll_workers.register(backend, zmq.POLLIN)

    poll_both = Poller()
    poll_both.register(frontend, zmq.POLLIN)
    poll_both.register(backend, zmq.POLLIN)

    workers = []

    while True:
        if workers:
            socks = yield poll_both.poll()
        else:
            socks = yield poll_workers.poll()
        socks = dict(socks)

        # Handle worker activity on backend
        if socks.get(backend) == zmq.POLLIN:
            # Use worker address for LRU routing
            msg = yield backend.recv_multipart()
            if not msg:
                break
            print('I: received msg: {}'.format(msg))
            address = msg[0]
            workers.append(address)

            # Everything after the second (delimiter) frame is reply
            reply = msg[2:]

            # Forward message to client if it's not a READY
            if reply[0] != LRU_READY:
                print('I: sending -- reply: {}'.format(reply))
                yield frontend.send_multipart(reply)
            else:
                print('I: received ready -- address: {}'.format(address))

        if socks.get(frontend) == zmq.POLLIN:
            #  Get client request, route to first available worker
            msg = yield frontend.recv_multipart()
            worker = workers.pop(0)
            request = [worker, b''] + msg
            print('I: sending -- worker: {}  msg: {}'.format(worker, msg))
            yield backend.send_multipart(request)
Ejemplo n.º 2
0
def socket():
    """Context manager to give the bound socket."""
    ctx = Context()
    sock = ctx.socket(zmq.PAIR)
    sock.bind("tcp://*:8889")
    yield sock
    sock.close()
Ejemplo n.º 3
0
def run_worker():
    context = Context(1)
    worker = context.socket(zmq.REQ)

    identity = "%04X-%04X" % (randint(0, 0x10000), randint(0, 0x10000))
    worker.setsockopt_string(zmq.IDENTITY, identity)
    worker.connect("tcp://localhost:5556")

    print("I: (%s) worker ready" % identity)
    yield worker.send_string(LRU_READY)

    cycles = 0
    while True:
        msg = yield worker.recv_multipart()
        if not msg:
            break

        cycles += 1
        if cycles > 3 and randint(0, 5) == 0:
            print("I: (%s) simulating a crash" % identity)
            break
        elif cycles > 3 and randint(0, 5) == 0:
            print("I: (%s) simulating CPU overload" % identity)
            yield gen.sleep(3)
        print("I: (%s) normal reply" % identity)
        # Do some heavy work
        yield gen.sleep(1)
        yield worker.send_multipart(msg)
Ejemplo n.º 4
0
    async def open(self):
        context = Context()
        sock = context.socket(zmq.SUB)
        sock.setsockopt(zmq.CONFLATE, 1)
        sock.connect("tcp://localhost:{}".format(settings.ZMQ_PORT))
        sock.subscribe(b'')

        self.sock = sock
        self._stopped = False
        logger.debug("client connected: %s", self.request.remote_ip)
        tornado.ioloop.IOLoop.current().spawn_callback(self.pipe_message)
class PyTestEngine(object):
    def __init__(self, opts):
        self.opts = opts
        self.id = opts["id"]
        self.role = opts["__role"]
        self.returner_address = opts["pytest-{}".format(self.role)]["returner_address"]

    def start(self):
        log.info("Starting Pytest Event Forwarder Engine(forwarding to %s)", self.returner_address)
        self.io_loop = ioloop.IOLoop()
        self.io_loop.make_current()
        self.io_loop.add_callback(self._start)
        atexit.register(self.stop)
        self.io_loop.start()

    @gen.coroutine
    def _start(self):
        self.context = Context()
        self.push = self.context.socket(zmq.PUSH)
        log.debug("Connecting PUSH socket to %s", self.returner_address)
        self.push.connect(self.returner_address)
        minion_opts = self.opts.copy()
        minion_opts["file_client"] = "local"
        self.event = salt.utils.event.get_event(
            "master", opts=minion_opts, io_loop=self.io_loop, listen=True
        )
        self.event.subscribe("")
        self.event.set_event_handler(self.handle_event)
        event_tag = "salt/master/{}/start".format(self.id)
        log.info("Firing event on engine start. Tag: %s", event_tag)
        load = {"id": self.id, "tag": event_tag, "data": {}}
        self.event.fire_event(load, event_tag)

    def stop(self):
        push = self.push
        context = self.context
        event = self.event
        self.push = self.context = self.event = None
        if event:
            event.unsubscribe("")
            event.destroy()
        if push and context:
            push.close(1000)
            context.term()
            self.io_loop.add_callback(self.io_loop.stop)

    @gen.coroutine
    def handle_event(self, payload):
        tag, data = salt.utils.event.SaltEvent.unpack(payload)
        log.debug("Received Event; TAG: %r DATA: %r", tag, data)
        forward = salt.utils.msgpack.dumps((self.id, tag, data), use_bin_type=True)
        yield self.push.send(forward)
Ejemplo n.º 6
0
def publisher(port=8135):
    context = Context()
    pub_uuid = str(uuid.uuid4())
    pub = context.socket(zmq.PUB)
    pub.connect("tcp://localhost:%s" % port)
    poller = Poller()
    poller.register(pub, zmq.POLLOUT)
    while True:
        topic = 'heartbeat'
        utc = arrow.utcnow()
        raw = json.dumps({"timestamp":utc.timestamp, "uuid": pub_uuid})
        message = '{0} {1}'.format(topic, raw)
        yield pub.send(message)
        yield gen.sleep(1)
Ejemplo n.º 7
0
def subscriber(port=8135):
    '''
        Bind Subscriber
    '''
    logging.warning("Binding SUB socket on port: {0}".format(port))
    context = Context()
    sub = context.socket(zmq.SUB)
    sub.bind("tcp://*:%s" % port)

    sub.setsockopt(zmq.SUBSCRIBE, " ")
    sub.setsockopt(zmq.SUBSCRIBE, "heartbeat")
    sub.setsockopt(zmq.SUBSCRIBE, "asterisk")
    sub.setsockopt(zmq.SUBSCRIBE, "logging")
    sub.setsockopt(zmq.SUBSCRIBE, "upload")
    sub.setsockopt(zmq.SUBSCRIBE, "beam")

    poller = Poller()
    poller.register(sub, zmq.POLLIN)

    http_client = _http_client.AsyncHTTPClient()

    while True:
        events = yield poller.poll(timeout=1000)
        if sub in dict(events):
            # receive raw msg from sub
            msg = yield sub.recv()
            # get topic and message
            topic = msg.split(' ')[0]
            message = ' '.join(msg.split(' ')[1:])
            # this make more sense directly on the beam
            # that why we're moving, still this sub_bind.py
            # clear some ideas directly.
            if topic.startswith('heartbeat'):
                print(topic, message)
            elif topic.startswith('asterisk'):
                print(topic, message)
            elif topic.startswith('logging'):
                print(topic, message)
            elif topic.startswith('upload'):
                print(topic, message)
            elif topic.startswith('beam'):
                print(topic, message)
            else:
                # let it crash
                logging.warning('let it crash')
                print(msg)
        else:
            #logging.warning('nothing receive')
            pass
Ejemplo n.º 8
0
def run_server():
    context = Context()
    server = context.socket(zmq.REP)
    server.bind(SERVER_ADDR)
    cycles = 0
    while True:
        request = yield server.recv()
        cycles += 1
        # Simulate various problems, after a few cycles
        if cycles > 3 and randint(0, 3) == 0:
            print("I: Simulating a crash")
            server.unbind(SERVER_ADDR)
            # Delay for a bit, else we get "Address already in use" error.
            # Note that to really simulate a crash, we should probably kill
            # this process and start another.
            yield gen.sleep(2)
            break
        elif cycles > 3 and randint(0, 3) == 0:
            print("I: Simulating CPU overload")
            yield gen.sleep(2)
        print("I: Normal request (%s)" % request)
        yield gen.sleep(1)  # Do some heavy work
        yield server.send(request)
    raise gen.Return((context, server))
Ejemplo n.º 9
0
def run_server():
    context = Context()
    server = context.socket(zmq.REP)
    server.bind(SERVER_ADDR)
    cycles = 0
    while True:
        request = yield server.recv()
        cycles += 1
        # Simulate various problems, after a few cycles
        if cycles > 3 and randint(0, 3) == 0:
            print("I: Simulating a crash")
            server.unbind(SERVER_ADDR)
            # Delay for a bit, else we get "Address already in use" error.
            # Note that to really simulate a crash, we should probably kill
            # this process and start another.
            yield gen.sleep(2)
            break
        elif cycles > 3 and randint(0, 3) == 0:
            print("I: Simulating CPU overload")
            yield gen.sleep(2)
        print("I: Normal request (%s)" % request)
        yield gen.sleep(1)       # Do some heavy work
        yield server.send(request)
    raise gen.Return((context, server))
Ejemplo n.º 10
0
def run_broker(loop):
    """ main broker method """
    url_worker = "inproc://workers"
    url_client = "inproc://clients"
    client_nbr = NBR_CLIENTS * 3
    # Prepare our context and sockets
    context = Context()
    frontend = context.socket(zmq.ROUTER)
    frontend.bind(url_client)
    backend = context.socket(zmq.ROUTER)
    backend.bind(url_worker)
    # create workers and clients threads
    # worker_tasks = []
    for idx in range(NBR_WORKERS):
        loop.add_callback(partial(run_worker, url_worker, context, idx))
        # worker_tasks.append(task)
    # client_tasks = []
    for idx in range(NBR_CLIENTS):
        loop.add_callback(partial(run_client, url_client, context, idx))
        # client_tasks.append(task)
    # Logic of LRU loop
    # - Poll backend always, frontend only if 1+ worker ready
    # - If worker replies, queue worker as ready and forward reply
    # to client if necessary
    # - If client requests, pop next worker and send request to it
    # Queue of available workers
    available_workers = 0
    workers_list = []
    all_workers = set()
    # init poller
    poller = Poller()
    # Always poll for worker activity on backend
    poller.register(backend, zmq.POLLIN)
    # Poll front-end only if we have available workers
    poller.register(frontend, zmq.POLLIN)
    while True:
        socks = yield poller.poll()
        socks = dict(socks)
        # Handle worker activity on backend
        if backend in socks and socks[backend] == zmq.POLLIN:
            # Queue worker address for LRU routing
            message = yield backend.recv_multipart()
            assert available_workers < NBR_WORKERS
            worker_addr = message[0]
            # add worker back to the list of workers
            available_workers += 1
            workers_list.append(worker_addr)
            all_workers.add(worker_addr)
            #   Second frame is empty
            empty = message[1]
            assert empty == b""
            # Third frame is READY or else a client reply address
            client_addr = message[2]
            # If client reply, send rest back to frontend
            if client_addr != b"READY":
                # Following frame is empty
                empty = message[3]
                assert empty == b""
                reply = message[4]
                yield frontend.send_multipart([client_addr, b"", reply])
                printdbg('(run_broker) to frontend -- reply: "{}"'.format(reply))
                client_nbr -= 1
                if client_nbr == 0:
                    printdbg("(run_broker) exiting")
                    break  # Exit after N messages
        # poll on frontend only if workers are available
        if available_workers > 0:
            if frontend in socks and socks[frontend] == zmq.POLLIN:
                # Now get next client request, route to LRU worker
                # Client request is [address][empty][request]
                response = yield frontend.recv_multipart()
                [client_addr, empty, request] = response
                assert empty == b""
                #  Dequeue and drop the next worker address
                available_workers += -1
                worker_id = workers_list.pop()
                yield backend.send_multipart([worker_id, b"", client_addr, b"", request])
                printdbg('(run_broker) to backend -- request: "{}"'.format(request))
    # out of infinite loop: do some housekeeping
    printdbg("(run_broker) finishing")
    for worker_id in workers_list:
        yield backend.send_multipart([worker_id, b"", b"", b"", b"Stop"])
    printdbg("(run_broker) workers cancelled")
    yield gen.sleep(1)
    frontend.close()
    backend.close()
    # context.term()     # Caution: calling term() blocks.
    printdbg("(run_broker) returning")
    result = "finished ok"
    raise gen.Return(result)
Ejemplo n.º 11
0
def run_broker(loop):
    """ main broker method """
    url_worker = "inproc://workers"
    url_client = "inproc://clients"
    client_nbr = NBR_CLIENTS * 3
    # Prepare our context and sockets
    context = Context()
    frontend = context.socket(zmq.ROUTER)
    frontend.bind(url_client)
    backend = context.socket(zmq.ROUTER)
    backend.bind(url_worker)
    # create workers and clients threads
    #worker_tasks = []
    for idx in range(NBR_WORKERS):
        loop.add_callback(partial(run_worker, url_worker, context, idx))
        #worker_tasks.append(task)
    #client_tasks = []
    for idx in range(NBR_CLIENTS):
        loop.add_callback(partial(run_client, url_client, context, idx))
        #client_tasks.append(task)
    # Logic of LRU loop
    # - Poll backend always, frontend only if 1+ worker ready
    # - If worker replies, queue worker as ready and forward reply
    # to client if necessary
    # - If client requests, pop next worker and send request to it
    # Queue of available workers
    available_workers = 0
    workers_list = []
    all_workers = set()
    # init poller
    poller = Poller()
    # Always poll for worker activity on backend
    poller.register(backend, zmq.POLLIN)
    # Poll front-end only if we have available workers
    poller.register(frontend, zmq.POLLIN)
    while True:
        socks = yield poller.poll()
        socks = dict(socks)
        # Handle worker activity on backend
        if (backend in socks and socks[backend] == zmq.POLLIN):
            # Queue worker address for LRU routing
            message = yield backend.recv_multipart()
            assert available_workers < NBR_WORKERS
            worker_addr = message[0]
            # add worker back to the list of workers
            available_workers += 1
            workers_list.append(worker_addr)
            all_workers.add(worker_addr)
            #   Second frame is empty
            empty = message[1]
            assert empty == b""
            # Third frame is READY or else a client reply address
            client_addr = message[2]
            # If client reply, send rest back to frontend
            if client_addr != b'READY':
                # Following frame is empty
                empty = message[3]
                assert empty == b""
                reply = message[4]
                yield frontend.send_multipart([client_addr, b"", reply])
                printdbg('(run_broker) to frontend -- reply: "{}"'.format(
                    reply))
                client_nbr -= 1
                if client_nbr == 0:
                    printdbg('(run_broker) exiting')
                    break   # Exit after N messages
        # poll on frontend only if workers are available
        if available_workers > 0:
            if (frontend in socks and socks[frontend] == zmq.POLLIN):
                # Now get next client request, route to LRU worker
                # Client request is [address][empty][request]
                response = yield frontend.recv_multipart()
                [client_addr, empty, request] = response
                assert empty == b""
                #  Dequeue and drop the next worker address
                available_workers += -1
                worker_id = workers_list.pop()
                yield backend.send_multipart(
                    [worker_id, b"", client_addr, b"", request])
                printdbg('(run_broker) to backend -- request: "{}"'.format(
                    request))
    #out of infinite loop: do some housekeeping
    printdbg('(run_broker) finishing')
    for worker_id in workers_list:
        yield backend.send_multipart([worker_id, b"", b"", b"", b"Stop"])
    printdbg('(run_broker) workers cancelled')
    yield gen.sleep(1)
    frontend.close()
    backend.close()
    #context.term()     # Caution: calling term() blocks.
    printdbg('(run_broker) returning')
    result = 'finished ok'
    raise gen.Return(result)