def main(): global in_pipe, out_pipe, public_key, private_key try: docopt_config = "Usage: my_program.py [--port=PORT] [--connect=PORT]" arguments = docopt.docopt(docopt_config) port = arguments["--port"] if port == None: port = 5555 connect_dest = arguments["--connect"] except docopt.DocoptExit as e: print(e.message) return context = zmq.Context() in_pipe = zpipe(context) out_pipe = zpipe(context) loop = asyncio.get_event_loop() net_config = {"port": port} # Generate Node Keys & Id. private_key = enc.generate_RSA(4096) public_key = private_key.publickey() # debug("Private Key=[%s], Public Key=[%s]." % (str(private_key.exportKey("PEM")), str(public_key.exportKey("PEM")))) node_id = enc.generate_ID(public_key.exportKey("DER")) debug("node_id=[%s]." % node_id.hexdigest()) # Start Net Engine. zmq_future = loop.run_in_executor(None, engageNet, loop, context, out_pipe[0], in_pipe[1], net_config) # thread = threading.Thread(target=engageNet, args=(loop, context, out_pipe[0], in_pipe[1], net_config)) # thread.daemon = True # thread.start() # Connect for testing. if connect_dest != None: out_pipe[1].send_multipart( [b"conn", "tcp://{}".format(connect_dest).encode()]) # out_pipe[0].send_multipart([b"conn", "tcp://localhost:{}".format(port).encode()]) try: loop.run_until_complete(zmq_future) except BaseException as e: handleException("loop.run_until_complete()", e) out_pipe[1].send_multipart([b"shutdown"]) loop.stop() loop.close() zmq_future.cancel() sys.exit(1)
def __init__(self): self.context = zmq.Context() self.commands, command_frontend = zpipe(self.context) self.requests, request_frontend = zpipe(self.context) self.agent = threading.Thread(target=agent_task, args=(self.context, command_frontend, request_frontend)) self.agent.daemon = True self.agent.start()
def main(): global in_pipe, out_pipe, public_key, private_key try: docopt_config = "Usage: my_program.py [--port=PORT] [--connect=PORT]" arguments = docopt.docopt(docopt_config) port = arguments["--port"] if port == None: port = 5555 connect_dest = arguments["--connect"] except docopt.DocoptExit as e: print(e.message) return context = zmq.Context() in_pipe = zpipe(context) out_pipe = zpipe(context) loop = asyncio.get_event_loop() net_config = {"port": port} # Generate Node Keys & Id. private_key = enc.generate_RSA(4096) public_key = private_key.publickey(); # debug("Private Key=[%s], Public Key=[%s]." % (str(private_key.exportKey("PEM")), str(public_key.exportKey("PEM")))) node_id = enc.generate_ID(public_key.exportKey("DER")) debug("node_id=[%s]." % node_id.hexdigest()) # Start Net Engine. zmq_future = loop.run_in_executor(None, engageNet, loop, context, out_pipe[0], in_pipe[1], net_config) # thread = threading.Thread(target=engageNet, args=(loop, context, out_pipe[0], in_pipe[1], net_config)) # thread.daemon = True # thread.start() # Connect for testing. if connect_dest != None: out_pipe[1].send_multipart([b"conn", "tcp://{}".format(connect_dest).encode()]) # out_pipe[0].send_multipart([b"conn", "tcp://localhost:{}".format(port).encode()]) try: loop.run_until_complete(zmq_future) except BaseException as e: handleException("loop.run_until_complete()", e) out_pipe[1].send_multipart([b"shutdown"]) loop.stop() loop.close() zmq_future.cancel() sys.exit(1)
def main(): # Prepare our context and publisher socket ctx = zmq.Context() publisher = ctx.socket(zmq.PUB) publisher.bind("tcp://*:5557") updates, peer = zpipe(ctx) manager_thread = threading.Thread(target=state_manager, args=(ctx, peer)) manager_thread.daemon = True manager_thread.start() sequence = 0 random.seed(time.time()) try: while True: # Distribute as key-value message sequence += 1 kvmsg = KVMsg(sequence) kvmsg.key = "%d" % random.randint(1, 10000) kvmsg.body = "%d" % random.randint(1, 1000000) kvmsg.send(publisher) kvmsg.send(updates) except KeyboardInterrupt: print " Interrupted\n%d messages out" % sequence
def main (): # Start child threads ctx = zmq.Context.instance() p_thread = Thread(target=publisher_thread) s_thread = Thread(target=subscriber_thread) p_thread.start() s_thread.start() pipe = zpipe(ctx) subscriber = ctx.socket(zmq.XSUB) subscriber.connect("tcp://localhost:6000") publisher = ctx.socket(zmq.XPUB) publisher.bind("tcp://*:6001") l_thread = Thread(target=listener_thread, args=(pipe[1],)) l_thread.start() try: monitored_queue(subscriber, publisher, pipe[0], 'pub', 'sub') except KeyboardInterrupt: print ("Interrupted") del subscriber, publisher, pipe ctx.term()
def main(): # writeBigFile() # Start child threads ctx = zmq.Context() a, b = zpipe(ctx) client = Thread(target=client_thread, args=(ctx, b)) server = Thread(target=server_thread, args=(ctx, )) client.start() start = time.time() print "start" server.start() # loop until client tells us it's done try: chunks = int(a.recv()) stop = time.time() print "MB/sec:%s" % (float(size) / (stop - start)) print "OPS/sec:%s" % (float(chunks) / (stop - start)) except KeyboardInterrupt: pass del a, b ctx.term()
def main(): verbose = '-v' in sys.argv ctx = zmq.Context() # Create MDP client session with short timeout client = MajorDomoClient("tcp://localhost:5555", verbose) client.timeout = 1000 # 1 sec client.retries = 1 # only 1 retry request_pipe, peer = zpipe(ctx) request_thread = threading.Thread(target=titanic_request, args=(peer, )) request_thread.daemon = True request_thread.start() reply_thread = threading.Thread(target=titanic_reply) reply_thread.daemon = True reply_thread.start() close_thread = threading.Thread(target=titanic_close) close_thread.daemon = True close_thread.start() poller = zmq.Poller() poller.register(request_pipe, zmq.POLLIN) queue_filename = os.path.join(TITANIC_DIR, 'queue') # Main dispatcher loop while True: # Ensure message directory exists if not os.path.exists(TITANIC_DIR): os.mkdir(TITANIC_DIR) f = open(queue_filename, 'wb') f.close() # We'll dispatch once per second, if there's no activity try: items = poller.poll(1000) except KeyboardInterrupt: break # Interrupted if items: # Append UUID to queue, prefixed with '-' for pending suuid = request_pipe.recv().decode('utf-8') with open(queue_filename, 'ab') as f: line = "-%s\n" % suuid f.write(line.encode('utf-8')) # Brute-force dispatcher with open(queue_filename, 'rb+') as f: for entry in f.readlines(): entry = entry.decode('utf-8') # UUID is prefixed with '-' if still waiting if entry[0] == '-': suuid = entry[1:].rstrip() # rstrip '\n' etc. print("I: processing request %s" % suuid) if service_success(client, suuid): # mark queue entry as processed here = f.tell() f.seek(-1 * len(entry), os.SEEK_CUR) f.write('+'.encode('utf-8')) f.seek(here, os.SEEK_SET)
def main(): ctx = zmq.Context.instance() pub_pipe, pub_peer = zpipe(ctx) sub_pipe, sub_peer = zpipe(ctx) pub_thread = threading.Thread(target=publisher, args=(pub_peer,)) pub_thread.daemon=True pub_thread.start() sub_thread = threading.Thread(target=subscriber, args=(sub_peer,)) sub_thread.daemon=True sub_thread.start() # wait for sub to finish sub_pipe.recv() # tell pub to halt pub_pipe.send(b"break") time.sleep(0.1)
def main(): ctx = zmq.Context.instance() pub_pipe, pub_peer = zpipe(ctx) sub_pipe, sub_peer = zpipe(ctx) pub_thread = threading.Thread(target=publisher, args=(pub_peer, )) pub_thread.daemon = True pub_thread.start() sub_thread = threading.Thread(target=subscriber, args=(sub_peer, )) sub_thread.daemon = True sub_thread.start() # wait for sub to finish sub_pipe.recv() # tell pub to halt pub_pipe.send("break") time.sleep(0.1)
def main(): # Prepare our context and publisher socket ctx = zmq.Context() publisher = ctx.socket(zmq.PUB) publisher.bind("tcp://*:5557") updates, peer = zpipe(ctx) manager_thread = threading.Thread(target=state_manager, args=(ctx,peer)) manager_thread.daemon=True manager_thread.start() sequence = 0 random.seed(time.time()) try: while True: # Distribute as key-value message sequence += 1 kvmsg = KVMsg(sequence) kvmsg.key = "%d" % random.randint(1,10000) kvmsg.body = "%d" % random.randint(1,1000000) kvmsg.send(publisher) kvmsg.send(updates) except KeyboardInterrupt: print " Interrupted\n%d messages out" % sequence
def _add_service(self, cls, *args, **kwargs): pipe, peer = zpipe(self.ctx) # create control socket pair # create service, passing local values along with rest of given args service = cls(peer, self.__endpoint, self.__uuid, self.__config_service, *args, **kwargs) self.__services[pipe] = service # add to our dict, using pipe socket as key if Configuration == cls: self.__config_service = service
def main(): # writeBigFile() # Start child threads ctx = zmq.Context() a,b = zpipe(ctx) client = Thread(target=client_thread, args=(ctx, b)) server = Thread(target=server_thread, args=(ctx,)) client.start() start=time.time() print "start" server.start() # loop until client tells us it's done try: chunks= int(a.recv()) stop=time.time() print "MB/sec:%s"%(float(size)/(stop-start)) print "OPS/sec:%s"%(float(chunks)/(stop-start)) except KeyboardInterrupt: pass del a,b ctx.term()
def main(): # Prepare our context and publisher socket ctx = zmq.Context() publisher = ctx.socket(zmq.PUB) publisher.bind("tcp://*:5557") updates, peer = zpipe(ctx) manager_thread = threading.Thread(target=state_manager, args=(ctx, peer)) # manager_thread.daemon=True manager_thread.start() sequence = 0 random.seed(time.time()) try: while True: # Distribute as key-value message sequence += 1 kvmsg = KVMsg(sequence) kvmsg.key = ("%6d" % random.randint(0, 9999)).encode() kvmsg.body = ("%4d" % random.randint(0, 999999)).encode() kvmsg.send(publisher) kvmsg.send(updates) time.sleep(0.01) except KeyboardInterrupt: updates.send_multipart([b'QUIT']) manager_thread.join() print(" Interrupted\n%d messages out" % sequence)
def __init__(self): self.ctx = zmq.Context() self.pipe, peer = zpipe(self.ctx) self.agent = threading.Thread(target=clone_agent, args=(self.ctx, peer)) self.agent.daemon = True self.agent.start()
def __init__(self): self.context = zmq.asyncio.Context() agent_context = zmq.Context() self.commands, agent_command_socket = zpipe(self.context, agent_context, zmq.PAIR, zmq.PAIR) self.requests, agent_request_socket = zpipe(self.context, agent_context, zmq.DEALER, zmq.ROUTER, int(CLIENT_QUEUE_SIZE / 2)) # So that we don't lose any request by blocking SEND in case of server being not available agent_request_socket.router_mandatory = 1 self.agent = threading.Thread(target=agent_task, args=(agent_context, agent_command_socket, agent_request_socket)) self.agent.daemon = True self.agent.start()
def __init__(self): self.context = zmq.Context() self.commands, agent_command_socket = zpipe(self.context, self.context, zmq.PAIR, zmq.PAIR) self.agent = threading.Thread(target=do_agent_task, args=(self.context, agent_command_socket)) self.agent.daemon = False self.agent.start()
def main(): verbose = '-v' in sys.argv ctx = zmq.Context() # Create MDP client session with short timeout client = MajorDomoClient("tcp://localhost:5555", verbose) client.timeout = 1000 # 1 sec client.retries = 1 # only 1 retry request_pipe, peer = zpipe(ctx) request_thread = threading.Thread(target=titanic_request, args=(peer,)) request_thread.daemon = True request_thread.start() reply_thread = threading.Thread(target=titanic_reply) reply_thread.daemon = True reply_thread.start() close_thread = threading.Thread(target=titanic_close) close_thread.daemon = True close_thread.start() poller = zmq.Poller() poller.register(request_pipe, zmq.POLLIN) # Main dispatcher loop while True: # Ensure message directory exists if not os.path.exists(TITANIC_DIR): os.mkdir(TITANIC_DIR) # We'll dispatch once per second, if there's no activity try: items = poller.poll(1000) except KeyboardInterrupt: break # Interrupted if items: # Append UUID to queue, prefixed with '-' for pending uuid = request_pipe.recv() with open(os.path.join(TITANIC_DIR, 'queue'), 'a') as f: f.write("-%s\n" % uuid) # Brute-force dispatcher # with open(os.path.join(TITANIC_DIR, 'queue'), 'r+b') as f: for entry in f.readlines(): # UUID is prefixed with '-' if still waiting if entry[0] == '-': uuid = entry[1:].rstrip() # rstrip '\n' etc. print "I: processing request %s" % uuid if service_success(client, uuid): # mark queue entry as processed here = f.tell() f.seek(-1*len(entry), os.SEEK_CUR) f.write('+') f.seek(here, os.SEEK_SET)
def run(loop): ctx = Context() a, b = zpipe(ctx) responses = yield [ client_task(ctx, b), server_task(ctx), monitor(a), ] print('responses: {}'.format(responses)) del a, b print('(run) finished')
def run(loop): ctx = Context() a, b = zpipe(ctx) tasks = [ asyncio.ensure_future(client_task(ctx, b)), asyncio.ensure_future(server_task(ctx)), asyncio.ensure_future(monitor(a)), ] loop.run_until_complete(asyncio.wait(tasks)) results = [task.result() for task in tasks] print('results: {}'.format(results)) del a, b print('(run) finished')
def __init__(self): self.context = zmq.Context() self.commands, agent_command_socket = zpipe(self.context, self.context, zmq.PAIR, zmq.PAIR) self.requests = self.context.socket(zmq.DEALER) self.requests.identity = IDENTITY_CLIENT self.requests.sndhwm = DEALER_QUEUE_SIZE self.requests.connect("inproc://toto") self.agent = threading.Thread(target=agent_task, args=(self.context, agent_command_socket)) self.agent.daemon = True self.agent.start()
def __init__(self, debug=False, discovery=True, connect_localhost=True): self.debug = debug self.ctx = zmq.Context() self.pipe, peer = zpipe(self.ctx) self.connected_event = threading.Event() self.agent = threading.Thread(target=dkv_agent, args=(self.ctx, peer, self.connected_event)) self.agent.daemon = True self.agent.start() if connect_localhost: self.connect(address='tcp://localhost:%d' % self.port) if discovery: self.connect_via_discovery()
def main(): # Create threads ctx = zmq.Context() client,pipe = zpipe(ctx) client_thread = threading.Thread(target=client_task, args=(ctx, pipe)) worker_thread = threading.Thread(target=worker_task) worker_thread.daemon=True broker_thread = threading.Thread(target=broker_task) broker_thread.daemon=True worker_thread.start() broker_thread.start() client_thread.start() # Wait for signal on client pipe client.recv()
def main(): # Create threads ctx = zmq.Context() client, pipe = zpipe(ctx) client_thread = threading.Thread(target=client_task, args=(ctx, pipe)) worker_thread = threading.Thread(target=worker_task) worker_thread.daemon = True broker_thread = threading.Thread(target=broker_task) broker_thread.daemon = True worker_thread.start() broker_thread.start() client_thread.start() # Wait for signal on client pipe client.recv()
def main(): # Start child threads ctx = zmq.Context() a,b = zpipe(ctx) client = Thread(target=client_thread, args=(ctx, b)) server = Thread(target=server_thread, args=(ctx,)) client.start() server.start() # loop until client tells us it's done try: print (a.recv()) except KeyboardInterrupt: pass del a,b ctx.term()
def _exec_base(self): # pair socket for controlling Role; not used here pipe, peer = zpipe(self.ctx) try: role = Base(peer, self.args.address, gen_uuid()) peer = None # closed by peer except ZMQError as e: self.logger.debug('exception while starting base role:', exc_info=True) self.logger.error('Unable to start base node: %s' % e) self.logger.error('Is one already running on the given address?') return -1 # start playing role # NOTE: this should only return when exiting role.play() # cleanup pipe.close() del pipe, peer
def main(): verbose = '-v' in sys.argv ctx = zmq.Context() # Create MDP client session with short timeout # this client is used by service_success method client = MajorDomoClient("tcp://localhost:5555", verbose) client.timeout = 1000 # 1 sec client.retries = 1 # only 1 retry request_pipe, peer = zpipe(ctx) request_thread = threading.Thread(target=titanic_request, args=( peer, verbose, )) request_thread.daemon = True request_thread.start() reply_thread = threading.Thread(target=titanic_reply, args=(verbose, )) reply_thread.daemon = True reply_thread.start() close_thread = threading.Thread(target=titanic_close, args=(verbose, )) close_thread.daemon = True close_thread.start() poller = zmq.Poller() poller.register(request_pipe, zmq.POLLIN) # Ensure message directory exists TITANIC_DIR.mkdir(parents=True, exist_ok=True) # create the dispatcher queue file, if not present queue = TITANIC_DIR.joinpath('queue') queue.touch() # Main dispatcher loop while True: # We'll dispatch once per second, if there's no activity try: items = poller.poll(1000) except KeyboardInterrupt: break # Interrupted if items: # Append UUID to queue, prefixed with '-' for pending uuid = request_pipe.recv() with open(queue, 'a') as f: f.write(f"-{uuid.decode()}\n") # Brute-force dispatcher with open(queue, 'r+b') as f: for entry in f.readlines(): entry = entry.decode() # UUID is prefixed with '-' if still waiting if entry[0] == '-': uuid = entry[1:].rstrip() # rstrip '\n' etc. print(f"I: processing request {uuid}") if service_success(client, uuid): # mark queue entry as processed here = f.tell() f.seek(-1 * len(entry), os.SEEK_CUR) f.write(b'+') f.seek(here, os.SEEK_SET) print(f"completed {uuid}")
def __init__(self): self.context = zmq.Context() # command socket in the client thread self.command_socket, self.agent_command_socket = zpipe( self.context, self.context, zmq.PAIR, zmq.PAIR)
def __init__(self): self.ctx = zmq.Context() self.pipe, peer = zpipe(self.ctx) self.agent = threading.Thread(target=agent_task, args=(self.ctx,peer)) self.agent.daemon = True self.agent.start()
def __handle_assignment(self, response): if self.in_play_state: # @todo need to handle re-assignment self.logger.warning('received re-assignment; ignoring') return if 'level' not in response.properties: self.logger.error('property missing: level') return level = response['level'] if level not in ['root', 'branch', 'leaf']: self.logger.error('unknown assignment level: %s' % level) return self.role_pipe, peer = zpipe(self.ctx) if 'root' == level: assert 'config-file' in response.properties self.role = Root( peer, self.endpoint, self.uuid, response['config-file'], ) else: assert 'parent' in response.properties assert 'group' in response.properties self.group = response['group'] self.logger.debug('adding filter: "{}"'.format(TOPO.group_key(response['group']))) self.topo_socket.setsockopt_string(SUBSCRIBE, TOPO.group_key(response['group'])) if 'branch' == level: self.logger.debug('adding filter: "{}"'.format(TOPO.recovery_key())) self.topo_socket.setsockopt_string(SUBSCRIBE, TOPO.recovery_key()) self.role = Collector( peer, self.endpoint, self.uuid, response['parent'], response['group'], ) else: self.role = Metric( peer, self.endpoint, self.uuid, response['parent'], response['group'], ) peer = None # closed by peer/role self.level = level self.__play_role()