def recv_message(self, socket, flags=0, validate=True, timeout=None): '''Receive a message object from the given socket, using the given flags. Message validation is performed if ``validate`` is true. If ``timeout`` is given, then it is the number of milliseconds to wait prior to raising a ZMQWMTimeout exception. ``timeout`` is ignored if ``flags`` includes ``zmq.NOBLOCK``.''' if timeout is None or flags & zmq.NOBLOCK: message = socket.recv_pyobj(flags) else: poller = zmq.Poller() poller.register(socket, zmq.POLLIN) try: poll_results = dict(poller.poll(timeout=timeout)) if socket in poll_results: message = socket.recv_pyobj(flags) else: raise ZMQWMTimeout('recv timed out') finally: poller.unregister(socket) if self._super_debug: self.log.debug('received {!r}'.format(message)) if validate: with self.message_validation(message): self.validate_message(message) return message
def request_data(): """ Push data to correlation graph """ socket.send_string("Request_%s" % self.plotName.replace(' ', '_')) nowStr = time.strftime("%b %d %Y %H:%M:%S", time.localtime()) print( "correlation plot %s requested data at %s, plot %g seconds " % (self.plotName, nowStr, time.time() - self.plotStartTime)) print '*** waiting' data_dict = socket.recv_pyobj() print '*** received' pdSeriesDict = {} # FIX ME: why this? # Get time from data_dict print("total time data: ", data_dict['event_time'][0], data_dict['event_time'][-1]) timeData = data_dict['event_time'][-self.number_of_events:] modTimeData = timeData[:, 0] + (timeData[:, 1] * 1e-6).astype(int) * 1e-3 print("got data starting at: ", modTimeData[0], len(modTimeData)) for key in self.data.keys(): pdSeriesDict[key] = pd.Series( data_dict[key][-self.number_of_events:], index=modTimeData) full_frame = pd.DataFrame(pdSeriesDict) self.streamData.event(df=full_frame)
def update_data(self): while True: current_time = int(round(time.time() * 1000)) diff = max(0, self.data_next_refresh - current_time) events = dict(self.poller.poll(diff)) if not events: timestamp = self.data_next_refresh / 1000 self.data_next_refresh += self.data_period break for socket in events: if events[socket] != zmq.POLLIN: continue message = socket.recv_pyobj() reference, timestamp, data = message self.buffer[reference] = data print(self.data_next_refresh) for identifier, sensor in self.sensors.items(): reference = self.references[identifier] if reference not in self.buffer: continue if isinstance(sensor, LineSensor): sensor.push_data(timestamp, self.buffer[reference]) else: sensor.push_data(self.buffer[reference])
def tell_done(url, fname): """Tell the database that the learner has reached it's goal. Parameters ---------- url : str The url of the database manager running via (`adaptive_scheduler.server_support.manage_database`). fname : str The filename of the learner that is done. """ log.info("goal reached! 🎉🎊🥳") with ctx.socket(zmq.REQ) as socket: socket.connect(url) socket.send_pyobj(("stop", fname)) log.info("sent stop signal", fname=fname) socket.recv_pyobj() # Needed because of socket type
def worker_routine(self, worker_url, context: zmq.Context = None): context = context or zmq.Context.instance() socket = context.socket(zmq.REP) socket.connect(worker_url) while True: message = socket.recv_pyobj() socket.send_string("ok") self.qu.put(message)
def get_learner(url, learners, fnames): """Get a learner from the database running at `url`. Parameters ---------- url : str The url of the database manager running via (`adaptive_scheduler.server_support.manage_database`). learners : list of `adaptive.BaseLearner` isinstances List of `learners` corresponding to `fnames`. fnames : list List of `fnames` corresponding to `learners`. Returns ------- fname : str The filename of the learner that was chosen. """ job_id = get_job_id() log.info(f"trying to get learner", job_id=job_id) with ctx.socket(zmq.REQ) as socket: socket.connect(url) socket.send_pyobj(("start", job_id)) log.info(f"sent start signal") reply = socket.recv_pyobj() log.info("got reply", reply=str(reply)) if reply is None: msg = f"No learners to be run for {job_id}." log.exception(msg) raise RuntimeError(msg) elif isinstance(reply, Exception): log.exception("got an exception") raise reply else: fname = reply log.info(f"got fname") def maybe_lst(fname): if isinstance(fname, tuple): # TinyDB converts tuples to lists fname = list(fname) return fname try: learner = next(l for l, f in zip(learners, fnames) if maybe_lst(f) == fname) except StopIteration: msg = "Learner with this fname doesn't exist in the database." log.exception(msg) raise UserWarning(msg) log.info("picked a learner") return learner, fname
def request_data(): """ Push data to correlation graph """ socket.send_string("Request_%s" % self.plotName.replace(' ', '_')) nowStr = time.strftime("%b %d %Y %H:%M:%S", time.localtime()) print( "correlation plot %s requested data at %s, plot %g seconds " % (self.plotName, nowStr, time.time() - self.plotStartTime)) data_dict = socket.recv_pyobj() pdSeriesDict = {} # FIX ME: why this? # Get time from data_dict print("total time data: ", data_dict['event_time'][0], data_dict['event_time'][-1]) timeData = data_dict['event_time'][-self.number_of_events:] modTimeData = timeData[:, 0] + (timeData[:, 1] * 1e-6).astype(int) * 1e-3 print("got data starting at: ", modTimeData[0], len(modTimeData)) #get all relevant fields scanVarName = '' for key in data_dict.keys(): if key.find('scan') >= 0: scanVarName = key i0 = data_dict[self.i0var] sig = data_dict[self.sigvar] laser = data_dict['lightStatus__laser'] xray = data_dict['lightStatus__xray'] i0 = i0[xray > 0] sig = sig[xray > 0] i0_on = i0[laser > 0] i0_off = i0[laser == 0] sig_on = sig[laser > 0] sig_off = sig[laser == 0] for key in self.data.keys(): pdSeriesDict[key] = pd.Series( data_dict[key][-self.number_of_events:], index=modTimeData) full_frame = pd.DataFrame(pdSeriesDict) self.streamData.event(df=full_frame)
def q_worker(): context = zmq.Context() socket = context.socket(zmq.REP) # Server ip = get_ip() print(ip) # Funzt nur mit LAN-IP oder Stern: socket.bind('tcp://{}:26231'.format(ip)) while tqw_alive == True: while q.qsize() < q_target: via_rpi3 = socket.recv_pyobj() socket.send_pyobj(int(q.qsize())) if via_rpi3 == 'Weiter!': break ts, bmsg = via_rpi3 q.put((ts, bmsg)) time.sleep(fpsk_min * 3.0)
FilterVar = setupDict[plotName]['FilterVar'] FilterVarMin = setupDict[plotName]['FilterVarMin'] FilterVarMax = setupDict[plotName]['FilterVarMax'] context = zmq.Context() socket = context.socket(zmq.SUB) socket.connect("tcp://%s:%d" % ('daq-xpp-mon05', 5000)) socket.setsockopt(zmq.SUBSCRIBE, b"") #if socket.poll(timeout=0): nrep = 1 while nrep > 0: socket.send_string("Request_for test") nowStr = time.strftime("%b %d %Y %H:%M:%S", time.localtime()) print("test requested data at %s " % (nowStr)) data_dict = socket.recv_pyobj() print("got data") data = {'scanSteps': []} data['scanValues_on'] = [] scanVarName = 'delay' for key in data_dict.keys(): if key.find('scan') >= 0: scanVarName = key #no scan, assume it's a delay scan. if scanVarName == 'delay': if np.std(data_dict['enc__lasDelay']) < 1e-3: print 'no scan, return' sys.exit()
def send_msg(msg, socket=None, server=None, querry_timeout=5, info=0): """ return the result of running the task *runnable* with the given arguments. params: host: e.g. '210.45.117.30' or 'qtg7501' if use the later should add ip hostname pair in /etc/hosts querry: querry whether server available querry_timeout: 我曾经试过用 stopit module 来给recv设置timeout, 但是没有成功,应该是涉及到背后线程没有关闭 refer to https://github.com/zeromq/pyzmq/issues/132 """ if socket is None: assert server is not None context, socket = conn_server(server, info=info) if 1: socket.setsockopt(zmq.LINGER, 0) #this is needed or else timeout wont work #socket.send_pyobj('querry') socket.send_pyobj(msg) # use poll for timeouts: poller = zmq.Poller() poller.register(socket, zmq.POLLIN) if poller.poll(querry_timeout*1000): # 10s timeout in milliseconds reply = socket.recv_pyobj() else: #raise IOError("Timeout processing auth request") #reply = None reply = 'not_reachable' if 0: # below not working try: with stopit.SignalTimeout(querry_timeout, False) as ctx: #with stopit.ThreadingTimeout(querry_timeout, False) as ctx: print 'tttttry', port, host server_status = socket.recv_pyobj() except Exception as err: print 'rrrraise', err #socket.close() #context.term() #raise #server_status = 'not_reachable' raise print 'sssssss', ctx.state if ctx.state == ctx.EXECUTED: pass # All's fine, everything was executed within 10 seconds elif ctx.state == ctx.EXECUTING: pass # Hmm, that's not possible outside the block elif ctx.state == ctx.TIMED_OUT: server_status = 'not recheable' # Eeek the 10 seconds timeout occurred while executing the block elif ctx.state == ctx.INTERRUPTED: pass # Oh you raised specifically the TimeoutException in the block elif ctx.state == ctx.CANCELED: pass # Oh you called to_ctx_mgr.cancel() method within the block but it # executed till the end else: pass # That's not possible #print 'aaaaaafter ', ctx.state == ctx.TIMED_OUT , ctx.state == ctx.EXCUTING print 'aaaaaafter ', ctx.state == ctx.TIMED_OUT , ctx.TIMED_OUT, return reply
# avisar que el servicio esta activo nombre_equipo = str(socket.gethostname()) print(nombre_equipo, type(nombre_equipo)) reportJSON = report_service.reportService(op, nombre_equipo, port) print(reportJSON, type(reportJSON)) #msm = "+"+","+nombre_equipo+","+"8001" suma.send_pyobj(reportJSON) acuse = suma.recv_string() print(acuse) # contexto para reply context_rep = zmq.Context() socket = context_rep.socket(zmq.REP) socket.bind("tcp://*:"+port) message = socket.recv_pyobj() with open('data.json', 'r') as file: datas = json.load(file) operando1 = datas.get("operandouno") print(operando1) with open('data.json', 'r') as file: datas = json.load(file) operando2 = datas.get("operandodos") print(operando2)
def queue(runnable, args=None, kwargs=None, querry=False, host=None, port=None, tunnel=False, querry_timeout=5, tunnel_server=None, kill_server=False, ): """ return the result of running the task *runnable* with the given arguments. params: host: e.g. '210.45.117.30' or 'qtg7501' if use the later should add ip hostname pair in /etc/hosts querry: querry whether server available querry_timeout: 我曾经试过用 stopit module 来给recv设置timeout, 但是没有成功,应该是涉及到背后线程没有关闭 refer to https://github.com/zeromq/pyzmq/issues/132 """ #host = '222.195.73.70' #port = 90900 host = host if host is not None else '127.0.0.1' port = port if port is not None else 90900 args = args if args is not None else () kwargs = kwargs if kwargs is not None else {} context = zmq.Context() socket = context.socket(zmq.REQ) url = 'tcp://{}:{}'.format(host, port) if not tunnel: #one should either use connect or tunnel_connection, not both. socket.connect(url) else: #zmq.ssh.tunnel_connection(socket, url, "myuser@remote-server-ip") #issue: 似乎tunnel对port有限制,不能用90900这样5位数的的端口 zmq.ssh.tunnel_connection(socket, url, tunnel_server) #print 'tunnel succeed: {}'.format(url) if kill_server: socket.send_pyobj({'header': 'stop'}) rep=server_status = socket.recv_pyobj() print 'REP: %s'%(rep, ) return results = None status = 'refuse' if querry: socket.setsockopt(zmq.LINGER, 0) #this is needed or else timeout wont work #socket.send_pyobj('querry') num_of_threads = None num_of_memory = None if len(args)>0: #in main.Main.run_many_dist, NUM_OF_THREADS is passed in args[0] if isinstance(args[0], dict): num_of_threads = args[0].get('NUM_OF_THREADS') num_of_memory = args[0].get('num_of_memory', None) socket.send_pyobj({ 'header': 'querry', 'what': 'is_available', #'num_of_threads': kwargs.get('NUM_OF_THREADS', None), # requested resources #'num_of_memory': kwargs.get('num_of_memory', None), 'num_of_threads': num_of_threads, 'num_of_memory': num_of_memory, }) # use poll for timeouts: poller = zmq.Poller() poller.register(socket, zmq.POLLIN) if poller.poll(querry_timeout*1000): # 10s timeout in milliseconds server_status = socket.recv_pyobj() #print_vars(vars(), ['server_status']) else: #raise IOError("Timeout processing auth request") #not able to reach server within querry_timeout #some times, need to enlarge querry_timeout to ensure connection success server_status = 'not_reachable' status = 'conn timeout' if 0: # below not working try: with stopit.SignalTimeout(querry_timeout, False) as ctx: #with stopit.ThreadingTimeout(querry_timeout, False) as ctx: print 'tttttry', port, host server_status = socket.recv_pyobj() except Exception as err: print 'rrrraise', err #socket.close() #context.term() #raise #server_status = 'not_reachable' raise print 'sssssss', ctx.state if ctx.state == ctx.EXECUTED: pass # All's fine, everything was executed within 10 seconds elif ctx.state == ctx.EXECUTING: pass # Hmm, that's not possible outside the block elif ctx.state == ctx.TIMED_OUT: server_status = 'not recheable' # Eeek the 10 seconds timeout occurred while executing the block elif ctx.state == ctx.INTERRUPTED: pass # Oh you raised specifically the TimeoutException in the block elif ctx.state == ctx.CANCELED: pass # Oh you called to_ctx_mgr.cancel() method within the block but it # executed till the end else: pass # That's not possible #print 'aaaaaafter ', ctx.state == ctx.TIMED_OUT , ctx.state == ctx.EXCUTING print 'aaaaaafter ', ctx.state == ctx.TIMED_OUT , ctx.TIMED_OUT, else: server_status = 'available' if server_status == 'available': #runnable_string = cloud.serialization.cloudpickle.dumps(runnable) #socket.send_pyobj({'header': 'run', # 'runnable_string': runnable_string, # 'args': args, # 'kwargs': kwargs # }) msg = pack_runnable_msg(runnable, args, kwargs) socket.send_pyobj(msg) results = socket.recv_pyobj() status = 'done' else: if server_status != 'not_reachable': status += ' %s'%(server_status, ) # these are not necessary, but still good practice: socket.close() context.term() return status, results
def conn_server(server, info=0): """ return the result of running the task *runnable* with the given arguments. params: host: e.g. '210.45.117.30' or 'qtg7501' if use the later should add ip hostname pair in /etc/hosts querry: querry whether server available querry_timeout: 我曾经试过用 stopit module 来给recv设置timeout, 但是没有成功,应该是涉及到背后线程没有关闭 refer to https://github.com/zeromq/pyzmq/issues/132 """ server_info = resolve_server(server) host = server_info['host'] port = server_info['port'] tunnel = server_info['tunnel'] tunnel_server = server_info['tunnel_server'] if info>0: print_vars(vars(), ['server_info']) context = zmq.Context() socket = context.socket(zmq.REQ) url = 'tcp://{}:{}'.format(host, port) if not tunnel: #one should either use connect or tunnel_connection, not both. socket.connect(url) else: #zmq.ssh.tunnel_connection(socket, url, "myuser@remote-server-ip") #issue: 似乎tunnel对port有限制,不能用90900这样5位数的的端口 zmq.ssh.tunnel_connection(socket, url, tunnel_server) #print 'tunnel succeed: {}'.format(url) if 0: socket.setsockopt(zmq.LINGER, 0) #this is needed or else timeout wont work #socket.send_pyobj('querry') socket.send_pyobj({'header': 'querry'}) # use poll for timeouts: poller = zmq.Poller() poller.register(socket, zmq.POLLIN) if poller.poll(querry_timeout*1000): # 10s timeout in milliseconds server_status = socket.recv_pyobj() else: #raise IOError("Timeout processing auth request") status = server_status = 'not_reachable' if 0: # below not working try: with stopit.SignalTimeout(querry_timeout, False) as ctx: #with stopit.ThreadingTimeout(querry_timeout, False) as ctx: print 'tttttry', port, host server_status = socket.recv_pyobj() except Exception as err: print 'rrrraise', err #socket.close() #context.term() #raise #server_status = 'not_reachable' raise print 'sssssss', ctx.state if ctx.state == ctx.EXECUTED: pass # All's fine, everything was executed within 10 seconds elif ctx.state == ctx.EXECUTING: pass # Hmm, that's not possible outside the block elif ctx.state == ctx.TIMED_OUT: server_status = 'not recheable' # Eeek the 10 seconds timeout occurred while executing the block elif ctx.state == ctx.INTERRUPTED: pass # Oh you raised specifically the TimeoutException in the block elif ctx.state == ctx.CANCELED: pass # Oh you called to_ctx_mgr.cancel() method within the block but it # executed till the end else: pass # That's not possible #print 'aaaaaafter ', ctx.state == ctx.TIMED_OUT , ctx.state == ctx.EXCUTING print 'aaaaaafter ', ctx.state == ctx.TIMED_OUT , ctx.TIMED_OUT, # these are not necessary, but still good practice: #socket.close() #context.term() return context, socket
def add_face(): # json_path = '/mnt/code/face/face_tool/face_666.json' json_path = Code.face_path if not request.json or 'personInfo' not in request.json: return jsonify({'code':205, 'msg':'http请求参数错误'}) add_list = request.json.get('personInfo') zmq_ip = Code.localIp zmq_port = Code.zmq_port try: context = zmq.Context() socket = context.socket(zmq.REQ) socket.connect("tcp://%s:%s" % (zmq_ip, zmq_port)) print('success to connnect the server ip:%s , port:%s' % (zmq_ip, zmq_port)) except: print('fail to connnect the server') return jsonify({'code':204, 'msg':'程序内部错误'}) person_list = [] # 统计成功失败的数量的路径 total_pic = 0 success_pic = 0 fail_pic = 0 fail_list = [] for person in add_list: try: ID = person.get('ID') name = person.get('name') tag = person.get('tag') path_list = person.get('filePath') print('正在添加:%s...' % name) if not ID or not name or not path_list: # print('此人信息不完整') continue for path in path_list: total_pic += 1 start = time.time() # 路径转换 img_path = '/extstore/mah' + path # img_path = path img = cv2.imread(img_path) # 发送zmq对象, 进行人脸检测 pyobj = Munch() pyobj.img = img pyobj.type = 'feature_detection' socket.send_pyobj(pyobj) zmq_result = socket.recv_pyobj().result # 数量为 1才入库 if len(zmq_result.get('results')) == 1: front_face = zmq_result.get('results')[0].get('front_face') if front_face: freatrue = zmq_result.get('results')[0].get('feature') # 把信息组成一个字典 person_info = {} person_info["ID"] = ID person_info["name"] = name person_info["tag"] = tag person_info["path"] = path person_info["freatrue"] = freatrue # 添加进 person_list person_list.append(person_info) success_pic += 1 end = time.time() print('添加一张人脸耗时:', end - start) else: print('此人为侧脸, 不满足入库要求') fail_pic += 1 fail_list.append(path) elif len(zmq_result.get('results')) == 0: fail_pic += 1 fail_list.append(path) continue else: fail_pic += 1 fail_list.append(path) continue except: continue # 把 person_list写进人脸库 if os.path.isfile(json_path): pass else: with open(json_path, 'w') as f: f.write("[]") with open(json_path, 'r', encoding='utf-8-sig') as f: info_list = json.load(f) for i in person_list: info_list.append(i) # 写数据 with open(json_path, 'w') as f: json.dump(info_list, f, ensure_ascii=False) print('total_pic:', total_pic) print('success_pic', success_pic) print('person_list:', len(person_list)) print('fail_pic:', fail_pic) print('fail_list', fail_list) return jsonify( { "code":200, "msg":"添加人脸成功", "detail":{ "total_add_number":total_pic, "success_add_number":success_pic, "fail_add_number":fail_pic, "fail_add_picture":fail_list } } )
FLAGS = gflags.FLAGS FLAGS(sys.argv) context = zmq.Context() socket = context.socket(zmq.REP) socket.bind(FLAGS.addr) res = FLAGS.resolution filler = Filler(res, gpu=FLAGS.gpu) prefilled_img = np.zeros((res, res, 3), dtype=np.uint8) while True: msg = socket.recv_pyobj() ret = None if msg[0] == 'exit': socket.send_pyobj('ok') break elif msg[0] == 'args': opengl_arr, imgs, depths, pose, poses, fov = msg[1:] render_pc(cuda_pc, FLAGS.gpu, FLAGS.resolution, opengl_arr, imgs, depths, pose, poses, prefilled_img, fov) ret = filler.fill(prefilled_img, opengl_arr) success = False while not success: try:
def face_recognition(): if not request.json or 'taskGUID' not in request.json: return jsonify({'code':205, 'msg':'http请求参数错误'}) task_guid = request.json.get('taskGUID') file_path_list = request.json.get('filePath') url_list = request.json.get('url') imgbase64 = request.json.get('imgBase64') if not file_path_list and not url_list and not imgbase64: return jsonify({'code':205, 'msg':'http请求参数错误'}) zmq_ip = Code.localIp zmq_port = Code.zmq_port try: context = zmq.Context() socket = context.socket(zmq.REQ) socket.connect("tcp://%s:%s" % (zmq_ip, zmq_port)) print('success to connnect the server ip:%s , port:%s' % (zmq_ip, zmq_port)) except: print('fail to connnect the server') return jsonify({'code':204, 'msg':'程序内部错误'}) face_list = [] if file_path_list: for img_path in file_path_list: try: img = cv2.imread(img_path) except: continue pyobj = Munch() pyobj.img = img pyobj.type = 'face_recognition' socket.send_pyobj(pyobj) result_dict = socket.recv_pyobj().result.get('reslut') try: del result_dict['queryurl'] del result_dict['msg'] del result_dict['code'] except: pass face_list.append(result_dict) elif url_list: for url in url_list: try: capture = cv2.VideoCapture(url) if capture.isOpened(): ret, img = capture.read() except: continue pyobj = Munch() pyobj.img = img pyobj.type = 'face_recognition' socket.send_pyobj(pyobj) result_dict = socket.recv_pyobj().result.get('reslut') try: del result_dict['queryurl'] del result_dict['msg'] del result_dict['code'] except: pass face_list.append(result_dict) elif imgbase64: img_data = base64.b64decode(imgbase64) random_str = ''.join(random.sample('abcdefghijklmnopqrstuvwxyz0123456789',8)) picture_dir = os.getcwd() if os.path.isdir(os.getcwd() + '/picture_dir'): pass else: os.makedirs(os.getcwd() + '/picture_dir') img_path = os.getcwd() + '/picture_dir/' + random_str + '.jpg' with open(img_path, "wb") as f2: f2.write(img_data) img = cv2.imread(img_path) pyobj = Munch() pyobj.img = img pyobj.type = 'face_recognition' socket.send_pyobj(pyobj) result_dict = socket.recv_pyobj().result.get('reslut') try: del result_dict['queryurl'] del result_dict['msg'] del result_dict['code'] except: pass face_list.append(result_dict) os.remove(img_path) return jsonify({'taskGUID':task_guid, 'code': 200, 'result_list':face_list})
def MasterProcess_func(process_ID,undertaker_table,file_names_tables,availability_table): print("master process no. "+str(process_ID)+" started") # process_ID = int(sys.argv[1]) # IP_table = shared_memory.SharedMemory(name="IP_table") #connect to shared memory port = str(4000+process_ID) #port for receiving requests context = zmq.Context() socket = context.socket(zmq.REP) socket.bind("tcp://%s:%s" %( get_ip_address(), port)) #bind server # socket_datakeeper = context.socket(zmq.PULL) # socket.bind("tcp://%s:%s" %( get_ip_address(), port)) print(len(file_names_tables),file_names_tables) print(len(availability_table),availability_table) while True: #wait for new request print("#####################################################################################") print("waiting for new request") request_type,file_name = socket.recv_pyobj() print(str(process_ID)+" process received a request of type :"+request_type) print(file_name) print(file_names_tables,file_name) IP_return_list = find_file(file_names_tables,file_name) #search file name in all data keepers if request_type == "upload": print("IP_return_list : ",IP_return_list) if(len(IP_return_list) != 0): socket.send_pyobj("error : file already uploaded before") print("master response : error // file already uploaded before") else: ''' upload sequence ''' message = upload_handler(availability_table) print("master response :"+message +" is free to upload to") socket.send_pyobj(message) # wait for receive from data keeper # send success to client # free data keeper -> set IPport in availability_table to True elif (request_type == "download"): print("IP_return_list : ",IP_return_list) if(len(IP_return_list) != 0): ''' download sequence ''' message = download_handler(availability_table,IP_return_list) print("master response :"+str(len(message)) +" are free to download from") socket.send_pyobj(message) #send array of free IP:Port to client else: socket.send_pyobj("error : file not found in any data keeper") print("master response : error // file not found in any data keeper") elif request_type == "replyDownload": availability_table[file_name] = False print(file_name + "is taken now..") socket.send_pyobj("Fol 3alik ya client") elif request_type == "dataKeeperSuccess": IPport,oldrequest,filedownloaded=file_name print(file_name) availability_table[IPport] = True if (oldrequest == "upload"): IP,port=IPport.split(":") templist = file_names_tables[IP] templist.append(filedownloaded) file_names_tables[IP] = templist #file_names_tables[IP].append(filedownloaded) print("file_names_tables[IP] = ",file_names_tables[IP]) #TODO : send success sig to client socket.send_pyobj("Fol 3alik ya data keeper") else: socket.send_pyobj("error // request type not known") print("master response : error // request type not known")
def get_learner( learners: List[BaseLearner], fnames: List[str], url: str, log_fname: str, job_id: str, job_name: str, ) -> Tuple[str, str]: """Get a learner from the database running at `url` and this learner's process will be logged in `log_fname` and running under `job_id`. Parameters ---------- learners : list of `adaptive.BaseLearner` isinstances List of `learners` corresponding to `fnames`. fnames : list List of `fnames` corresponding to `learners`. url : str The url of the database manager running via (`adaptive_scheduler.server_support.manage_database`). log_fname : str The filename of the log-file. Should be passed in the job-script. job_id : str The job_id of the process the job. Should be passed in the job-script. job_name : str The name of the job. Should be passed in the job-script. Returns ------- fname : str The filename of the learner that was chosen. """ _add_log_file_handler(log_fname) log.info( "trying to get learner", job_id=job_id, log_fname=log_fname, job_name=job_name ) with ctx.socket(zmq.REQ) as socket: socket.connect(url) socket.send_pyobj(("start", job_id, log_fname, job_name)) log.info(f"sent start signal, timeout after 10s.") socket.setsockopt(zmq.RCVTIMEO, 10_000) # timeout after 10s reply = socket.recv_pyobj() log.info("got reply", reply=str(reply)) if reply is None: msg = f"No learners to be run." exception = RuntimeError(msg) log_exception(log, msg, exception) raise exception elif isinstance(reply, Exception): log_exception(log, "got an exception", exception=reply) raise reply else: fname = reply log.info(f"got fname") def maybe_lst(fname: Union[Tuple[str], str]): if isinstance(fname, tuple): # TinyDB converts tuples to lists fname = list(fname) return fname try: learner = next(l for l, f in zip(learners, fnames) if maybe_lst(f) == fname) except StopIteration: msg = "Learner with this fname doesn't exist in the database." exception = UserWarning(msg) log_exception(log, msg, exception) raise exception log.info("picked a learner") return learner, fname