def new_put_data(self, current_file_url, file_url, is_byte): context = zmqa.Context() # print("Client: FS addr:", self.filesystem_addr) filesystem_socket = context.socket(zmq.REQ) filesystem_socket.connect("tcp://" + self.filesystem_addr[0] + ":" + self.filesystem_addr[1]) # print("Client: connected to filesystem") message = Message().set_message("PUTDATA", { 'file_url': file_url, 'byte': is_byte }) filesystem_socket.send_string(str(message)) answer = mt.loop_tool(mt.try_to_recv, filesystem_socket.recv_string) if answer == -1: print("Client: Putdata answer timed out!") filesystem_socket.close() return -1 if answer.message_name != 'OK': print("Client: No me devolvieron OK en el put data") filesystem_socket.close() return 1 filesystem_socket.close() data_addr = answer.payload["ip"] + ":" + answer.payload["port"] putdata_context = zmqa.Context() putdata_socket = putdata_context.socket(zmq.REQ) # print("Client: data addr:", data_addr) putdata_socket.connect("tcp://" + data_addr) # print("Client:putdata socket connected to " + data_addr) # en este momento mandamos el data try: with open(current_file_url, 'r') as file: lines, eof = self._read_some_lines(file, is_byte) while True: putdata_socket.send_string(lines) # print("Client: Mandamos el texto: ",lines) if eof: break # print("about to receive the data answer") answer = mt.loop_tool(mt.try_to_recv, putdata_socket.recv_string) if answer.message_name != "OK": print( "Client: Data socket answer timed out. Something wrong sending the functions file" ) return 1 if answer == -1: print("Client: Data Message timed out") putdata_socket.close() return -1 lines, eof = self._read_some_lines(file, is_byte) # data_socket.recv_string() putdata_socket.close() print("Data sendend: ", current_file_url) except FileNotFoundError: print('No existe el file: ', current_file_url) return 1
def __init__(self, address, *, prefix=None, loop=None, zmq=None, zmq_asyncio=None, deserializer=pickle.loads): if prefix and b' ' in prefix: raise ValueError("prefix {!r} may not contain b' '".format(prefix)) if isinstance(prefix, str): prefix = [prefix] self._prefix = prefix if zmq is None: import zmq if zmq_asyncio is None: import zmq.asyncio as zmq_asyncio if isinstance(address, str): address = address.split(':', maxsplit=1) self._deserializer = deserializer self.address = (address[0], int(address[1])) if loop is None: loop = zmq_asyncio.ZMQEventLoop() self.loop = loop asyncio.set_event_loop(self.loop) self._context = zmq_asyncio.Context() self._socket = self._context.socket(zmq.SUB) url = "tcp://%s:%d" % self.address self._socket.connect(url) self._socket.setsockopt_string(zmq.SUBSCRIBE, "") self._task = None super().__init__()
def try_to_connect_master(self): print("Client: ", 'Esperando respuesta por: ', self.listener_addr) while True: print("Client: Into the while trying to get some master") sock = zmqa.Context().socket(zmq.REQ) self._get_new_master() print("Client: ", "Trying to cannect to: ", self.current_master) sock.connect("tcp://{}:{}".format(*self.current_master)) sock.send_string( str(Message().set_message( "JOB", { "job_url": self.job_url, "job_id": self.jhash, "data_type": self.data_type, "client_addr": self.listener_addr, "functions_url": self.files_urls["functions_url"], "map_data_url": self.files_urls["data_url"], "status_db_url": self.files_urls["db_url"] }))) print("Enviado el mensaje de job") answer = mt.loop_tool(mt.try_to_recv, sock.recv_string) if answer == -1: print("Client: ", self.current_master, " did not responded") self._get_new_master() continue print("Client:JOB answer:", answer) if answer.message_name == "OK": self.master_ping_addr = (answer.payload["ping_tracker_ip"], answer.payload["ping_tracker_port"]) print("Client: ", "Sended job") return 0
def start_server(self): """ Escucha lo que se manda por addr y en dependencia del mensaje, se ejecuta la función necesaria :param addr: Dirección que escuchará el worker :return: Nothing """ context = zmqa.Context() socket = context.socket(zmq.REP) socket.bind("tcp://{}:{}".format(self.addr[0], self.addr[1])) print("FS:Running in {}".format(self.addr)) attend_ping = Thread(target=self._attend_ping) attend_ping.daemon = True attend_ping.start() while True: print("FS: Into the WHILE") text = loop.run_until_complete(socket.recv_string()) print("FS: Server received:", text) message = Message().get_message(text) function_to_execute = self.names[message.message_name] function_to_execute(socket, message)
def send_job(self, data_file, function_file): """ A function to put a job's data on the service's filesystem :param data_file: The url of the data file to send :param function_file: The url of the function file to send :param data_type: The type of data stored on the data file :return: 0 if worked OK """ print("Client: ", 'Tratamos de hacer jobreg a: ', self.current_master) client_context = zmqa.Context() register_socket = client_context.socket(zmq.REQ) register_socket.connect("tcp://" + self.current_master[0] + ":" + self.current_master[1]) register_message = Message().set_message( "JOBREG", { "files": [function_file, data_file], "extra_info": self.job_info_for_register }) register_socket.send_string(str(register_message)) answer = mt.loop_tool(mt.try_to_recv, register_socket.recv_string) if answer == -1: print("Client: ", 'No me respondio: ', self.current_master) register_socket.close() return -1 print("Client: ", "Recibi del jobreg: ", answer) # print("Client:answer del JOBREG:", answer) if answer.message_name == "OK": self.jhash = answer.payload["job_id"] self.masters_list = answer.payload["trackers_addr"] # print(answer.payload["filesystem"]) self.filesystem_addrs = answer.payload["filesystem"] self.filesystem_addr = self._get_new_filesystem_node() if "info" in answer.payload.keys(): print("Info:", answer.payload["info"]) else: print("Client:Job not correctly sent") return -1 # First sends the files to the filesystem while True: r = self.send_job_data_to_fs(function_file, data_file) if r == -1: print("Client: Problem with the FS") # todo: tengo que escoger otro nodo de filesystem self.filesystem_addr = self._get_new_filesystem_node() else: break register_socket.close() assert self.try_to_connect_master() != -1, "No hay workers disponibles" return 0
async def receiver2(id): context = azmq.Context() in_socket = context.socket(zmq.PULL) in_socket.connect('tcp://localhost:32769') while True: msg = await in_socket.recv_string() print(f'client{id}: {msg}')
async def send_byte_data(data, addr): context = zmqa.Context() data_sender_socket = context.socket(zmq.REQ) data_sender_socket.connect('tcp://{}:{}'.format(*addr)) data_sender_socket.send(data) message = await data_sender_socket.recv_string() # print("Recibimos el mensaje: ",message) data_sender_socket.close() return Message().get_message(message)
async def send_message_recv_byte(message, addr): context = zmqa.Context() sock = context.socket(zmq.REQ) sock.connect("tcp://{}:{}".format(addr[0], addr[1])) sock.send_string(str(message)) # print('este es el mensaje que le mando: ',message) answer = await sock.recv() # print('esto fue lo que me mandaron: ',answer) sock.close() return answer
def new_get_data(self, file_url, is_byte_data=True): context = zmqa.Context() filesystem_socket = context.socket(zmq.REQ) # print("Vamos a buscar los resultados") print( "Client:filesystem addr:", "tcp://" + self.filesystem_addr[0] + ":" + self.filesystem_addr[1]) filesystem_socket.connect("tcp://{}:{}".format(*self.filesystem_addr)) filesystem_socket.send_string( str(Message().set_message("GETDATA", { "byte": is_byte_data, "file_url": file_url }))) data_sock_message = mt.loop_tool(mt.try_to_recv, filesystem_socket.recv_string) if data_sock_message == -1: print("No me respondio el filesystem") return -1 if data_sock_message.message_name != "OK": print( "Client:Some error happened getting the message with the ip and port to get the result of " "the operation") print("instead, data sock message was:", str(data_sock_message)) return 1 data_socket = context.socket(zmq.REQ) data_socket.connect("tcp://{}:{}".format( data_sock_message.payload["ip"], data_sock_message.payload["port"])) data_socket.send_string(str(Message().set_message("OK"))) # print("Nos preparamos para recibir linea por linea") eof_cha = Client._get_end_character(is_byte_data) result_byte = eof_cha temp_line = mt.loop_tool(mt.raw_try_to_recv, data_socket.recv) if temp_line == -1: print("No me respondio el filesystem") return -1 while temp_line != eof_cha: # print("Recibimos la linea") data_socket.send_string(str(Message().set_message("OK"))) result_byte += temp_line temp_line = mt.loop_tool(mt.raw_try_to_recv, data_socket.recv) if temp_line == -1: print("No me respondio el filesystem") return -1 import pickle result_data = pickle.loads(result_byte) # print("Tenemos los resultados: ", result_data) return result_data
async def receiver(): context = azmq.Context() in_socket = context.socket(zmq.SUB) in_socket.connect('tcp://localhost:32770') in_socket.setsockopt_string(zmq.SUBSCRIBE, 'EURUSD') while True: msg = await in_socket.recv_string() print(msg) print('sleep') await asyncio.sleep(5) print('wake up')
async def sender(): context = azmq.Context() out_socket = context.socket(zmq.PUSH) out_socket.connect('tcp://localhost:32768') await out_socket.send_string(format_order(**{'action': 'OPEN'})) # await out_socket.send_string(format_order(**{'action': 'CLOSE_MAGIC'})) # for i in range(1): # await out_socket.send_string(format_order(**{'action': 'GET_OPEN_TRADES'})) print('im here')
def new_put_job(self): context = zmqa.Context() # new_addr = (self.client_ip,mt.get_available_random_port(self.client_ip)) filesystem_socket = context.socket(zmq.REQ) print("Client: ", 'Tratemos de hacer putjob a: ', self.filesystem_addr) filesystem_socket.connect("tcp://{}:{}".format(*self.filesystem_addr)) message = Message().set_message("PUTJOB", {"job_id": self.jhash}) filesystem_socket.send_string(str(message)) answer = mt.loop_tool(mt.try_to_recv, filesystem_socket.recv_string) if answer == -1: print("PUTJOB answer timed out!") filesystem_socket.close() return -1 if answer.message_name != 'OK': print("No me devolvieron OK") filesystem_socket.close() return -1 filesystem_socket.close() data_addr = answer.payload["ip"] + ":" + answer.payload["port"] putjob_context = zmqa.Context() putjob_socket = putjob_context.socket(zmq.REQ) putjob_socket.connect("tcp://" + data_addr) putjob_socket.send_string(str(Message().set_message("OK"))) answer = mt.loop_tool(mt.try_to_recv, putjob_socket.recv_string) if answer == -1: print("PUTJOB answer timed out!") putjob_socket.close() return -1 if answer.message_name != "OK": print("Error") putjob_socket.close() return -1 if 'job_url' in answer.payload and 'database_url' in answer.payload: job_url = answer.payload['job_url'] database_url = answer.payload['database_url'] putjob_socket.close() return job_url, database_url putjob_socket.close() return -1
def _remove_job(self): sock = zmqa.Context().socket(zmq.REQ) sock.connect("tcp://{}:{}".format(*self.filesystem_addr)) sock.send_string( str(Message().set_message("REMOVEJOB", {'job_url': self.job_url}))) answer = mt.loop_tool(mt.try_to_recv, sock.recv_string) if answer == -1: print("Client: No me respondio el FileSystem") return -1 if answer.message_name == "OK": print("Client: Job removed") else: print("Client: error al remover el job:", answer.payload['info']) return 0
def __init__(self, config_file=None): super().__init__() self.logger = Logger('DataServer', False) config = utils.load_config(config_file) self.request_port = config['data_server_request_port'] self.pub_port = config['data_server_broadcast_port'] self.db_dir = f'{os.getcwd()}/data/test.db' self.socket = zmqa.Context().socket(zmq.ROUTER) self.socket.bind(f'tcp://127.0.0.1:{self.request_port}') self.broadcast_socket = zmqa.Context().socket(zmq.PUB) self.broadcast_socket.bind(f'tcp://127.0.0.1:{self.pub_port}') self.executor = futures.ThreadPoolExecutor(max_workers=10) self.tickers = set() self.counter = 0 self.run_coroutine(f'Listening on port {self.request_port}', self.handle_data_request) self.run_coroutine(f'Broadcasting started on port {self.pub_port}', self.broadcast_data)
def __init__(self, config_file=None): super().__init__() self.logger = Logger('PortfolioManager', False) config = utils.load_config(config_file) self.port = config['manager_request_port'] self.data_port = config['data_server_request_port'] self.accounts = {} # type: Dict[str, Account] self.account_queues = {} # type: Dict[str, asyncio.Queue] self.feedbacks = {} # type: Dict[str, asyncio.Queue] self.event_queue = asyncio.Queue() self.brokers = {const.Broker.SIMULATED: asyncio.Queue()} self.socket = zmqa.Context().socket(zmq.ROUTER) self.socket.bind(f'tcp://127.0.0.1:{self.port}') self.data_socket = zmqa.Context().socket(zmq.REQ) self.data_socket.connect(f'tcp://127.0.0.1:{self.data_port}') self.run_coroutine(f'Starting listening on port {self.port}', self.handle_request) self.run_coroutine('', self.handle_events) self.run_coroutine('', self.execute_simulated_order)
def __init__(self, tickers: list, port: int): super(MT4DataObject, self).__init__(tickers) self.port = port self.bar = None # to be initialized in set_params # connect to server self.context = azmq.Context() # noinspection PyUnresolvedReferences self.in_socket = self.context.socket(zmq.SUB) self.in_socket.connect(f'tcp://127.0.0.1:{port}') # subscribe to ticket data for ticker in tickers: # noinspection PyUnresolvedReferences self.in_socket.setsockopt_string(zmq.SUBSCRIBE, ticker)
async def __aenter__(self): self.zmq_ctx = azmq.Context() self.rx = self.zmq_ctx.socket(zmq.SUB) self.rx.connect(tx_url) self.rx.setsockopt_string(zmq.SUBSCRIBE, str(self.id)) self.tx = self.zmq_ctx.socket(zmq.PUSH) self.tx.connect(rx_url) self.smm = AFSharedMemoryManager().__enter__() self.ready = asyncio.Event() self.resource_map = { Resource.zmq_ctx: self.zmq_ctx, Resource.mc_url: mc_url_base, Resource.orch_tx_url: tx_url, Resource.orch_rx_url: rx_url, Resource.smm: self.smm, } return self
def init(self): self.log.debug("init zeromq ..") if not zmq: self.log.critical( "missing zeromq, please install pyzmq to use this plugin") raise RuntimeError("zeromq python module not found") self.ctx = zmq.Context() # sanity check config if self.config.get("bind"): if self.config.get("connect"): msg = "bind and connect are mutually exclusive" self.log.critical(msg) raise ValueError(msg) elif not self.config.get("connect"): msg = "missing bind or connect" self.log.critical(msg) raise ValueError(msg)
async def send_message_recv_str(message, addr): ''' Este metodo manda el mensaje y espera que le manden un string por la red :param self: :param message: :param addr: :return: ''' context = zmqa.Context() sock = context.socket(zmq.REQ) # print('Tratemos de conectarnos a: ',addr) sock.connect("tcp://{}:{}".format(*addr)) # print("Voy a mandar este mensaje desde sendmessage: ",message, ' a: ',addr) sock.send_string(str(message)) answer = await sock.recv_string() # print('Esta fue la respuesta a mi mensaje: ',answer,' de: ',addr) sock.close() return answer
def __init__(self, address, *, hostname=None, pid=None, run_engine_id=None, loop=None, zmq=None, zmq_asyncio=None): if zmq is None: import zmq if zmq_asyncio is None: import zmq.asyncio as zmq_asyncio if isinstance(address, str): address = address.split(':', maxsplit=1) self.address = (address[0], int(address[1])) self.hostname = hostname self.pid = pid self.run_engine_id = run_engine_id if loop is None: loop = zmq_asyncio.ZMQEventLoop() self.loop = loop asyncio.set_event_loop(self.loop) self._context = zmq_asyncio.Context() self._socket = self._context.socket(zmq.SUB) url = "tcp://%s:%d" % self.address self._socket.connect(url) self._socket.setsockopt_string(zmq.SUBSCRIBE, "") self._task = None def is_our_message(_hostname, _pid, _RE_id): # Close over filters and decide if this message applies to this # RemoteDispatcher. return ((hostname is None or hostname == _hostname) and (pid is None or pid == _pid) and (run_engine_id is None or run_engine_id == run_engine_id)) self._is_our_message = is_our_message super().__init__()
async def server(): context = azmq.Context() # noinspection PyUnresolvedReferences socket = context.socket(zmq.PUB) socket.bind('tcp://127.0.0.1:12345') data = [ 'usd 1;1.1', 'eur 2;2.05', 'usd 1.1;1.15', 'usd 1.15;1.2', 'eur 2.05;2.1', 'eur 2.1;2.1', ] await asyncio.sleep(1) # let client to warm up print('server starts') for msg in data: await socket.send_string(msg) await asyncio.sleep(0.1) print('server finishes')
async def server(): context = azmq.Context() socket = context.socket(zmq.PUB) socket.bind('tcp://127.0.0.1:12345') data = [ 'usd 1;1.1', 'eur 2;2.05', 'usd 1.1;1.15', 'usd 1.15;1.2', 'eur 2.05;2.1', 'eur 2.05;2.1', 'usd 1.05;1.1', ] await asyncio.sleep(1) print('server starts') for msg in data: await socket.send_string(msg) print(f'sent {pd.Timestamp.now("UTC")}') await asyncio.sleep(0.1) print('server finishes')
def __init__(self, tickers, data=None): super(RealTimeDataObject, self).__init__(tickers) self.bars = {ticker: Bar() for ticker in tickers} self.data_ready = threading.Event() self.agent_close = threading.Event() self.events = [self.data_ready, self.agent_close] self.look_back = None self.__open = [] self.__high = [] self.__low = [] self.__close = [] self.__prices = [self.__open, self.__high, self.__low, self.__close] config = utils.load_config() self.socket = zmqa.Context().socket(zmq.SUB) # sub needs to subscribe to topic for ticker in tickers: self.socket.setsockopt(zmq.SUBSCRIBE, ticker.encode()) self.socket.connect( f'tcp://127.0.0.1:{config["data_server_broadcast_port"]}') self.run_in_fork()
class LocalQueue: zmq_ctx = aiozmq.Context() def __init__(self, address, protocol='tcp', workers=1, secret=None): self.logger = logging.getLogger(__name__) self.address = protocol + '://' + address self.num_workers = workers self.secret = secret if not secret: self.logger.warning('No secret used.') self.server = None self.queue = asyncio.Queue() self.workers = [] self.stats = {} async def _worker(self, queue): self.logger.info("worker spawned") while True: self.logger.debug('waiting for the command') command = await queue.get() # type: ShellCommandWrapper self.logger.info('executing %r', command) stdout = open(os.path.join(command.cwd, 'stdout'), 'wb') stderr = open(os.path.join(command.cwd, 'stderr'), 'wb') proc = await asyncio.create_subprocess_shell(command.cmd, stdout=stdout, stderr=stderr, cwd=command.cwd, env=command.env) self.stats[command.id].status = JobStatus.RUNNING try: return_code = await proc.wait() self.stats[command.id].return_code = return_code self.stats[command.id].status = (JobStatus.COMPLETED if return_code == 0 else JobStatus.FAILED) self.logger.info('%r completed with status %d', command, return_code) except asyncio.CancelledError: self.logger.info('terminating a running process') proc.terminate() self.stats[command.id].return_code = await proc.wait() self.stats[command.id].status = JobStatus.INTERRUPTED break finally: try: proc.kill() except OSError: pass stdout.close() stderr.close() async def serve_forever(self): self.logger.info('starting server') socket = self.zmq_ctx.socket(zmq.REP) socket.bind(self.address) self.logger.info('REP socket bound to %s', self.address) while True: message = await socket.recv_json() if message['method'] == 'GET': response = self.do_GET(message) elif message['method'] == 'POST': response = self.do_POST(message) else: response = {'ok': False, 'error': 'invalid-method'} socket.send_json(response) def do_GET(self, content): status = self.stats.get(content['id']) if status is None: return { 'ok': True, 'id': content['id'], 'status': JobStatus.UNKNOWN, 'returncode': None } else: return { 'ok': True, 'id': content['id'], 'status': status.status, 'returncode': status.return_code } def do_POST(self, content): cmd = ShellCommandWrapper(cmd=content['cmd'], cwd=content['cwd'], env=content.get('env')) get_running_loop().call_soon(self.queue.put_nowait, cmd) self.stats[cmd.id] = ProcessStatus() self.logger.info('queued %r for execution', cmd) return { 'ok': True, 'id': cmd.id, 'status': JobStatus.QUEUED, 'returncode': None } def close(self): for worker in self.workers: worker.cancel() self.server.cancel() async def wait_closed(self): await asyncio.gather(self.server, *self.workers, return_exceptions=True) def run(self, loop=None): loop = loop or asyncio.get_event_loop() if self.workers: raise RuntimeError("Workers are already running") self.workers = [ loop.create_task(self._worker(self.queue)) for _ in range(self.num_workers) ] if self.server is not None: raise RuntimeError("Server is already running.") self.server = loop.create_task(self.serve_forever()) try: loop.run_forever() except KeyboardInterrupt: pass finally: try: self.close() loop.run_until_complete(self.wait_closed()) loop.run_until_complete(loop.shutdown_asyncgens()) self.workers.clear() self.server = None finally: loop.close()
def execute(self): context = zmqa.Context() socket = context.socket(zmq.REP) print("Client:Binded to the listener port:", "tcp://" + self.listener_addr[0] + ":" + self.listener_addr[1]) result = self._try_execute_task() socket.bind("tcp://" + self.listener_addr[0] + ":" + self.listener_addr[1]) assert result != -1, "No se pudo ejecutar el el job" # print("client: result:", result) p = Process(target=self.show_progress_job) p.daemon = True p.start() print("Client: Cargando....") while True: # Está en un while True para que se pueda mandar información por el socket ademas del resultado response = mt.loop_tool(mt.try_to_recv, socket.recv_string, 5) if response == -1: # print("Client: Result response timed out!") if self._ping_master(): # print("Client: ",'Aun esta corriendo el master') continue else: print("Client: Se cayo el master anterior,busquemos otro") self.try_to_connect_master() continue # Le respondo y después reviso lo que me mandó, si de todas formas le voy a responder lo mismo socket.send_string(str(Message().set_message("OK"))) if response.message_name == "DONE" or response.message_name == "ERROR": if response.message_name == "ERROR": print("Client:Error occurred during the operation: ", response.payload["info"]) return -1 print("Listo el resultado") break if response.message_name == "RELOCATE": print("Client: ", "Me Mandaron a hacer Relocate") self.master_ping_addr = (response.payload["ping_tracker_ip"], response.payload["ping_tracker_port"]) self.listener_addr = (response.payload["answer_ip"], response.payload["answer_port"]) socket.close() socket = context.socket(zmq.REP) socket.bind("tcp://" + self.listener_addr[0] + ":" + self.listener_addr[1]) continue else: print("Client:Operation info:", response.payload["info"]) socket.close() result_url = response.payload["result_url"] print("Client:The result is in:", result_url, ", inside the filesystem") current_tries = 0 result_data = None # Tratamos de recoger los resultados while current_tries < self.tries: result_data = self.new_get_data(result_url) if result_data == -1: print("Hubo bateo al buscar los resultados") self.filesystem_addr = self._get_new_filesystem_node() if self.filesystem_addr == -1: print("Client: No pudimos resolver los resultados") return -1 else: break # self.remove_job() return result_data
def new_put_job(self, new_addr, job_id, db_url): print("Estamos dentro del putjob") data_context = zmqa.Context() data_socket = data_context.socket(zmq.REP) try: data_socket.bind('tcp://{}:{}'.format(*new_addr)) except zmq.error.ZMQError: print("FS ZMQERROR") return -1 answer = mt.loop_tool(mt.try_to_recv, data_socket.recv_string, 1) if answer == -1: data_socket.close() print("No me respondieron en el put job") return -1 if answer.message_name != "OK": data_socket.close() print("Hubo algun error en el putjob") return -1 try: init_file = open("./" + str(job_id) + "/__init__.py", "x") init_file.close() db_file = open(db_url, "x") db_file.close() # Now I'm gonna configure the database connection = sql.connect(db_url) cursor = connection.cursor() cursor.execute('''CREATE TABLE block (block_id text PRIMARY KEY NOT NULL, state text, phase text,worker_ip text,worker_port text)''' ) cursor.execute('''CREATE TABLE slices_url (slice_url text PRIMARY KEY NOT NULL , block_id text, FOREIGN KEY (block_id) REFERENCES block (block_id)) ''') cursor.execute('''CREATE TABLE result_url (result_url text PRIMARY KEY NOT NULL )''') cursor.execute('''CREATE TABLE block_result (block_id text NOT NULL, result_url text NOT NULL, PRIMARY KEY (block_id,result_url) , FOREIGN KEY (block_id) REFERENCES block (block_id) ON DELETE CASCADE ON UPDATE NO ACTION, FOREIGN KEY (result_url) REFERENCES result_url (result_url) ON DELETE CASCADE ON UPDATE NO ACTION )''') cursor.execute('''CREATE TABLE job (job_id text PRIMARY KEY NOT NULL, tracker_ip_ping text, tracker_port_ping text, answer_ip text, answer_port text, status_phase text, map_data_url text, result_url text, job_state text, data_type text)''' ) connection.commit() cursor.close() connection.close() except FileExistsError: print("FS:Los archivos de inicialización ya existen... no sé cómo") data_socket.send_string( str(Message().set_message("OK", { "job_url": "./" + job_id, "database_url": db_url }))) data_socket.close() print("Se hizo putjob") return 0
def wait_for_results(self, assign_tasks, workers_addrs, put_byte): context = zmqa.Context() answer_socket = context.socket(zmq.REP) try: answer_socket.bind('tcp://{}:{}'.format(*self.tracker_addr)) except zmq.error.ZMQError: print("JobTracker: ZMQERROR in wait_for_results") answer_socket.bind('tcp://{}:{}'.format( self.tracker_ip, mt.get_available_random_port(self.tracker_ip))) print('JobTracker: ', "Ahora el tracker espera por respuestas DONE por: ", self.tracker_addr) print("JobTracker: ", 'Esperando Done de: ', workers_addrs) cnt_answers = 0 while True: ans = mt.loop_tool(self._try_to_recv_done, answer_socket) if ans is None: submitted_filters = [('state', mt.slices_states[1]), ('phase', self.job_phase)] writing_filters = [('state', mt.slices_states[-2]), ('phase', self.job_phase)] print( "JobTracker: Buscamos los submitted y los writing blocks") submitted_blocks_rows = self.status_handler.get_status_rows( self.file_sys_addr, 'block', submitted_filters) writing_blocks_rows = self.status_handler.get_status_rows( self.file_sys_addr, 'block', writing_filters) print("JobTracker: ", "Ya recibimos los bloques de submitted y writing") if len(submitted_blocks_rows) == 0: if len(writing_blocks_rows) == 0: print("JobTracker: ", 'Ya todos los bloques estan en DONE') return 0 continue testing_blocks = self._get_blocks_urls_with_blocks_rows( submitted_blocks_rows) if testing_blocks == 0: answer_socket.close() return 0 # print("JobTracker: ",'ESTOS SON LOS BLOQUES QUE QUEDAN ES ESTADO SUBMITTED: ',testing_blocks) indexes_to_remove = [] for i, block in enumerate(testing_blocks): block_id = submitted_blocks_rows[i]['block_id'] worker_assigned_ping = ( submitted_blocks_rows[i]['worker_ip'], submitted_blocks_rows[i]['worker_port']) # Todo: tengo que revisar porqué me dan key error si no se han caido ningun worker print("JobTracker: ", "ESTE ES EL WORKER ASSIGN DE BLOCK_ID ", block_id, " worker: ", worker_assigned_ping) print("JobTracker: ", 'Vamos a hacerle ping a :', worker_assigned_ping, ' con addr ping: ', worker_assigned_ping) answer = mt.loop_tool( mt.try_to_send_message, mt.send_message, mt.Message().set_message("PING"), worker_assigned_ping, lambda: print("JobTracker: ", 'No me respondio: ', worker_assigned_ping), 1) if answer is not None: print( "JobTracker: Ya se que este worker sigue pinchando: ", worker_assigned_ping) indexes_to_remove.append(i) else: print("JobTracker: Parece que murio este worker: ", worker_assigned_ping) print("JobTracker: ", 'buscamos otro worker') print("JobTracker: ", 'Ya le hicimo ping a todos, pacemos a reasignar') testing_blocks = [ testing_blocks[i] for i in range(len(testing_blocks)) if i not in indexes_to_remove ] print( "JobTracker: ", 'ESTOS SON LOS BLOQUES QUE QUEDAN(MOD) En ESTADO SUBMITTED: ', testing_blocks, ' QUE NO ME RESPONDEN') if len(testing_blocks) == 0: continue workers_addrs, workers_status = self._getting_workers() assign_tasks2 = self.send_tasks_messages(testing_blocks, put_byte, workers_status, registered_block=True) for x, y in assign_tasks2.items(): assign_tasks[x] = y continue answer = mt.Message().get_message(ans) if answer.message_name == 'ERROR': # self.send_error_message_to_addr(answer.payload,self.client_addr) print("JobTracker: ", "Error por parte del worker: ", answer.payload['info']) elif answer.message_name == "DONE": worker_addr = answer.payload['worker_addr'] worker_addr = (worker_addr[0], worker_addr[1]) print('JobTracker: ', "Recibi un Done de: {}:{}".format(*worker_addr)) message = mt.Message().set_message("OK") print("JobTracker: ", "Sending OK to: ", worker_addr) answer_socket.send_string(str(message)) cnt_answers += 1
async def _run(self): """ asyncio context and sockets have to be set up within a coroutine. Otherwise, they won't function as expected Also, if we declare asyncio context outside a coroutine, the program won't exit smoothly """ # ---------- Retrieve all schedulable jobs and run according to priorities ---------- all_jobs = [ job for _, job in inspect.getmembers(self, inspect.ismethod) if getattr(job, '__job__', False) ] job_groups = {} for job in all_jobs: job_groups.setdefault(job.__job__, []).append(job) # group jobs by priority ordered_groups = sorted(job_groups.keys()) for group in ordered_groups: self.logger.info('') self.logger.info( f'========== {Worker.stage_map[int(group)]} ==========' ) # log stage jobs_per_group = job_groups[group] # get all socket specs socket_list = utils.unique( utils.flatten( list(job.__sockets__.values()) for job in jobs_per_group)) # check if specs are unique minimal_sockets = utils.unique( spec.split('|')[0] for spec in socket_list) if len(minimal_sockets) < len(socket_list): raise ValueError( f'Duplicated socket specification: {socket_list}') # ---------- Initialize sockets ---------- context = azmq.Context() sockets = {} for spec in socket_list: port_name, protocol = spec.split('|') port = self.port_map.get(port_name, None) if port == -1: sockets[spec] = None elif port is not None: socket = context.socket(Worker.protocol_map[protocol]) address = f'tcp://127.0.0.1:{port}' # this is required for using timeout on REQ. Otherwise, socket blocks forever if protocol == 'REQ': # noinspection PyUnresolvedReferences socket.setsockopt(zmq.LINGER, 0) # default bind / connect classifications. May need extension in the future? if protocol in ['PUSH', 'SUB', 'REQ']: socket.connect(address) self.logger.info(f'{spec}: contacting {address}') elif protocol in ['PULL', 'PUB', 'REP']: socket.bind(address) self.logger.info(f'{spec}: listening on {address}') else: raise ValueError( f'Protocol {protocol} is not in any bind/connect category' ) sockets[spec] = socket else: raise ValueError(f'Port {port_name} not defined') # ---------- Start jobs ---------- try: awaitables = [] for job in jobs_per_group: inputs = { k: sockets[v] for k, v in job.__sockets__.items() } awaitables.append(job(**inputs)) await asyncio.gather(*awaitables) finally: for socket in sockets.values(): socket.close() if group == 2: self.logger.info('\n========== Shutting Down ==========') await self.shutdown()
from asyncio import get_running_loop, sleep, Queue as AsyncQueue import aioreactive as rx from contextvars import ContextVar from .actions import PrintAction, SendAction from .percepts import MessagePercept, ResultPercept from .utils import eprint, anext, aiter from .actions import GenericAction, wrap_coroutine from .actions import act_context from .percepts import percepts_context from .fipa_acl import AgentIdentifier context = zmq.Context() start_time = ContextVar("start_time") current_agent = ContextVar("current_agent") def local_address(port): return f"tcp://localhost:{port}" def own_address(port): # TODO: figure out how to find this when sending over the network. return local_address(port) async def loop_time(): loop = get_running_loop() return loop.time()