Exemplo n.º 1
0
def datatracker(portNb, portNotif):
    context = zmq.Context()
    socket = context.socket(zmq.PUB)
    socketFiles = context.socket(zmq.PAIR)
    socketNotification = context.socket(zmq.PAIR)
    socket.bind("tcp://127.0.0.1:%s" % portNb)
    socketNotification.bind("tcp://127.0.0.1:%s" % portNotif)
    # ----------------------------------------------------------------
    # ----------------------------------------------------------------
    # ----------------------------------------------------------------
    while True:
        print('sending i am alive')
        socket.send_pyobj({'ip': ip})
        try:
            msg = socketNotification.recv_pyobj(zmq.NOBLOCK)
            if "ip_toCopyTo" in msg:
                ip_toCopyTo = msg["ip_toCopyTo"]
                port_toCopyTo = msg["port"]
                socketFiles.bind("tcp://127.0.0.1:%s" % port_toCopyTo)
                socketFiles.send_pyobj({'file': mp4File})

            if "ip_toReceiveFrom" in msg:
                ip_toRecFrom = msg["ip_toReceiveFrom"]
                port_toRecFRom = msg["port"]
                socketFiles.connect("tcp://" + str(ip_toRecFrom) + ":" +
                                    str(port_toRecFRom))
                received = socketFiles.recv_pyobj()
                mp4file = received['file']
        except zmq.Again as e:
            pass
        time.sleep(1)
Exemplo n.º 2
0
def download(DKip, DKport):
    try:
        context = zmq.Context()
        socket = context.socket(zmq.REP)
        socket.bind("tcp://%s:%s" % (DKip, DKport))
        print("after binding...")

        message = socket.recv_string()
        print("Received request: ", message)
        video = message
        vid = open(video, 'rb')
        vi = vid.read()
        dic = {'video': vi}
        socket.send_pyobj(dic)
        mess = {'Type': 'Downloaded', 'ip': DKip, 'port': DKport}

        # --------------JUST FOR TEST --------------#
        #----------------REMOVE THE COMMENT LATER----#

        context2 = zmq.Context()
        socket2 = context2.socket(zmq.REQ)
        socket2.connect("tcp://%s:%s" % (MasterIP, MasterPort))
        socket2.send_pyobj(mess)
    except FileNotFoundError as e:
        print('file not found')
    except zmq.Again as e:
        pass
Exemplo n.º 3
0
def SendCommand(command_url, command, payload = None):
  """Send an arbitrary command to all workers running on the cluster.

  :param str command_url: URL of command channel.
  :param command: Command to send to workers.

  """
  context = zmq.Context()
  socket = context.socket(zmq.PUB)
  socket.connect(command_url)
  msg = (command, payload)
  socket.send_pyobj(msg)
Exemplo n.º 4
0
def SendCommand(command_url, command, payload=None):
    """Send an arbitrary command to all workers running on the cluster.

  :param str command_url: URL of command channel.
  :param command: Command to send to workers.

  """
    context = zmq.Context()
    socket = context.socket(zmq.PUB)
    socket.connect(command_url)
    msg = (command, payload)
    socket.send_pyobj(msg)
Exemplo n.º 5
0
def get_learner(url, learners, fnames):
    """Get a learner from the database running at `url`.

    Parameters
    ----------
    url : str
        The url of the database manager running via
        (`adaptive_scheduler.server_support.manage_database`).
    learners : list of `adaptive.BaseLearner` isinstances
        List of `learners` corresponding to `fnames`.
    fnames : list
        List of `fnames` corresponding to `learners`.

    Returns
    -------
    fname : str
        The filename of the learner that was chosen.
    """
    job_id = get_job_id()
    log.info(f"trying to get learner", job_id=job_id)
    with ctx.socket(zmq.REQ) as socket:
        socket.connect(url)
        socket.send_pyobj(("start", job_id))
        log.info(f"sent start signal")
        reply = socket.recv_pyobj()
        log.info("got reply", reply=str(reply))
        if reply is None:
            msg = f"No learners to be run for {job_id}."
            log.exception(msg)
            raise RuntimeError(msg)
        elif isinstance(reply, Exception):
            log.exception("got an exception")
            raise reply
        else:
            fname = reply
            log.info(f"got fname")

    def maybe_lst(fname):
        if isinstance(fname, tuple):
            # TinyDB converts tuples to lists
            fname = list(fname)
        return fname

    try:
        learner = next(l for l, f in zip(learners, fnames) if maybe_lst(f) == fname)
    except StopIteration:
        msg = "Learner with this fname doesn't exist in the database."
        log.exception(msg)
        raise UserWarning(msg)

    log.info("picked a learner")
    return learner, fname
Exemplo n.º 6
0
    def send_message(self, socket, message, payload=None, flags=0):
        '''Send a message object. Subclasses may override this to
        decorate the message with appropriate IDs, then delegate upward to actually send
        the message. ``message`` may either be a pre-constructed ``Message`` object or
        a message identifier, in which (latter) case ``payload`` will become the message payload.
        ``payload`` is ignored if ``message`` is a ``Message`` object.'''

        message = Message(message, payload)
        if message.master_id is None:
            message.master_id = self.master_id
        message.src_id = self.node_id

        if self._super_debug:
            self.log.debug('sending {!r}'.format(message))
        socket.send_pyobj(message, flags)
Exemplo n.º 7
0
 def send_message(self, socket, message, payload=None, flags=0):
     '''Send a message object. Subclasses may override this to
     decorate the message with appropriate IDs, then delegate upward to actually send
     the message. ``message`` may either be a pre-constructed ``Message`` object or 
     a message identifier, in which (latter) case ``payload`` will become the message payload.
     ``payload`` is ignored if ``message`` is a ``Message`` object.'''
     
     message = Message(message, payload)
     if message.master_id is None:
         message.master_id = self.master_id
     message.src_id=self.node_id
     
     if self._super_debug:
         self.log.debug('sending {!r}'.format(message))
     socket.send_pyobj(message,flags)
Exemplo n.º 8
0
def q_worker():
    context = zmq.Context()
    socket = context.socket(zmq.REP)  # Server
    ip = get_ip()
    print(ip)
    # Funzt nur mit LAN-IP oder Stern:
    socket.bind('tcp://{}:26231'.format(ip))
    while tqw_alive == True:
        while q.qsize() < q_target:
            via_rpi3 = socket.recv_pyobj()
            socket.send_pyobj(int(q.qsize()))
            if via_rpi3 == 'Weiter!':
                break
            ts, bmsg = via_rpi3
            q.put((ts, bmsg))
        time.sleep(fpsk_min * 3.0)
Exemplo n.º 9
0
def tell_done(url, fname):
    """Tell the database that the learner has reached it's goal.

    Parameters
    ----------
    url : str
        The url of the database manager running via
        (`adaptive_scheduler.server_support.manage_database`).
    fname : str
        The filename of the learner that is done.
    """
    log.info("goal reached! 🎉🎊🥳")
    with ctx.socket(zmq.REQ) as socket:
        socket.connect(url)
        socket.send_pyobj(("stop", fname))
        log.info("sent stop signal", fname=fname)
        socket.recv_pyobj()  # Needed because of socket type
Exemplo n.º 10
0
def sendDict(dict_to_send):
#    global socket
    setupDict=yaml.load(open('smalldata_plot.yml','r'))
    master_port=setupDict['master']['port']

    context = zmq.Context()
    socket = context.socket(zmq.REP)
    socket.bind("tcp://*:%s" % master_port)

    while True:
        message = socket.recv()
        print("smallData master received request: ", message)
        if len(dict_to_send.keys())>1:
            print("smallData master will send : ", dict_to_send['lightStatus__laser'].shape)
        else:
            print("we have an empty dictionary right now....")
        socket.send_pyobj(dict_to_send)
Exemplo n.º 11
0
def conn_server(server, info=0): 
    """
        return the result of running the task *runnable* with the given 
        arguments.
        
        params: 
            host: e.g. '210.45.117.30' or 'qtg7501' if use the later should add ip hostname pair in /etc/hosts
            querry:  querry whether server available 
            querry_timeout: 
                我曾经试过用 stopit module 来给recv设置timeout, 但是没有成功,应该是涉及到背后线程没有关闭
                refer to https://github.com/zeromq/pyzmq/issues/132
    """
    server_info = resolve_server(server)
    host = server_info['host']
    port = server_info['port']
    tunnel = server_info['tunnel']
    tunnel_server = server_info['tunnel_server']
    
    if info>0: 
        print_vars(vars(),  ['server_info'])
    context = zmq.Context()
    socket = context.socket(zmq.REQ)
    url = 'tcp://{}:{}'.format(host, port)
    if not tunnel:  #one should either use connect or tunnel_connection, not both.
        socket.connect(url)
    else:
        #zmq.ssh.tunnel_connection(socket, url, "myuser@remote-server-ip")
        #issue:  似乎tunnel对port有限制,不能用90900这样5位数的的端口
        zmq.ssh.tunnel_connection(socket, url, tunnel_server)
        #print 'tunnel succeed: {}'.format(url)
    
    if 0: 
        socket.setsockopt(zmq.LINGER, 0)   #this is needed or else timeout wont work 
        #socket.send_pyobj('querry')    
        socket.send_pyobj({'header': 'querry'})    
        # use poll for timeouts:
        poller = zmq.Poller()
        poller.register(socket, zmq.POLLIN)
        if poller.poll(querry_timeout*1000): # 10s timeout in milliseconds
            server_status = socket.recv_pyobj()
        else:
            #raise IOError("Timeout processing auth request")        
            status = server_status = 'not_reachable'

        if 0:  # below not working  
            try: 
                with stopit.SignalTimeout(querry_timeout, False) as ctx:
                #with stopit.ThreadingTimeout(querry_timeout, False) as ctx:
                    print 'tttttry', port, host  
                    server_status = socket.recv_pyobj()
            except Exception as err: 
                print 'rrrraise', err 
                #socket.close()
                #context.term()
                #raise 
                #server_status = 'not_reachable'
                raise 
            print 'sssssss', ctx.state    
            if ctx.state == ctx.EXECUTED:
                pass # All's fine, everything was executed within 10 seconds
            elif ctx.state == ctx.EXECUTING:
                pass # Hmm, that's not possible outside the block
            elif ctx.state == ctx.TIMED_OUT:
                server_status = 'not recheable' # Eeek the 10 seconds timeout occurred while executing the block
            elif ctx.state == ctx.INTERRUPTED:
                pass 
                # Oh you raised specifically the TimeoutException in the block
            elif ctx.state == ctx.CANCELED:
                pass # Oh you called to_ctx_mgr.cancel() method within the block but it # executed till the end
            else:
                pass 
                # That's not possible            
            #print 'aaaaaafter ', ctx.state == ctx.TIMED_OUT , ctx.state == ctx.EXCUTING 
            print 'aaaaaafter ', ctx.state == ctx.TIMED_OUT , ctx.TIMED_OUT,  
    
    # these are not necessary, but still good practice:
    #socket.close()
    #context.term()    
    return context, socket 
Exemplo n.º 12
0
def queue(runnable, args=None, kwargs=None, querry=False, 
        host=None, port=None, tunnel=False, querry_timeout=5, 
        tunnel_server=None, 
        kill_server=False, ):
    """
        return the result of running the task *runnable* with the given 
        arguments.
        
        params: 
            host: e.g. '210.45.117.30' or 'qtg7501' if use the later should add ip hostname pair in /etc/hosts
            querry:  querry whether server available 
            querry_timeout: 
                我曾经试过用 stopit module 来给recv设置timeout, 但是没有成功,应该是涉及到背后线程没有关闭
                refer to https://github.com/zeromq/pyzmq/issues/132
    """
    #host =  '222.195.73.70'
    #port = 90900
    host = host if host is not None else '127.0.0.1'
    port = port if port is not None else 90900
    args = args if args is not None else ()
    kwargs = kwargs if kwargs is not None else {}
    
    context = zmq.Context()
    socket = context.socket(zmq.REQ)
    url = 'tcp://{}:{}'.format(host, port)
    if not tunnel:  #one should either use connect or tunnel_connection, not both.
        socket.connect(url)
    else:
        #zmq.ssh.tunnel_connection(socket, url, "myuser@remote-server-ip")
        #issue:  似乎tunnel对port有限制,不能用90900这样5位数的的端口
        zmq.ssh.tunnel_connection(socket, url, tunnel_server)
        #print 'tunnel succeed: {}'.format(url)
       
    
    if kill_server: 
        socket.send_pyobj({'header': 'stop'}) 
        rep=server_status = socket.recv_pyobj()
        print 'REP: %s'%(rep, )
        return 
        
    results = None 
    status = 'refuse'
    
    if querry:
        socket.setsockopt(zmq.LINGER, 0)   #this is needed or else timeout wont work 
        #socket.send_pyobj('querry')    
        num_of_threads = None
        num_of_memory = None 
        if len(args)>0:  #in main.Main.run_many_dist, NUM_OF_THREADS is passed in args[0]
            if isinstance(args[0], dict): 
                num_of_threads = args[0].get('NUM_OF_THREADS')
                num_of_memory = args[0].get('num_of_memory', None) 
        socket.send_pyobj({
            'header': 'querry', 
            'what': 'is_available', 
            #'num_of_threads': kwargs.get('NUM_OF_THREADS', None),   # requested resources 
            #'num_of_memory': kwargs.get('num_of_memory', None), 
            'num_of_threads': num_of_threads, 
            'num_of_memory': num_of_memory, 
            })    
        # use poll for timeouts:
        poller = zmq.Poller()
        poller.register(socket, zmq.POLLIN)
        if poller.poll(querry_timeout*1000): # 10s timeout in milliseconds
            server_status = socket.recv_pyobj()
            #print_vars(vars(),  ['server_status'])
        else: 
            
            #raise IOError("Timeout processing auth request")        
            #not able to reach server within querry_timeout 
            #some times, need to enlarge querry_timeout to ensure connection success 
            server_status = 'not_reachable'
            status = 'conn timeout'

        if 0:  # below not working  
            try: 
                with stopit.SignalTimeout(querry_timeout, False) as ctx:
                #with stopit.ThreadingTimeout(querry_timeout, False) as ctx:
                    print 'tttttry', port, host  
                    server_status = socket.recv_pyobj()
            except Exception as err: 
                print 'rrrraise', err 
                #socket.close()
                #context.term()
                #raise 
                #server_status = 'not_reachable'
                raise 
            print 'sssssss', ctx.state    
            if ctx.state == ctx.EXECUTED:
                pass # All's fine, everything was executed within 10 seconds
            elif ctx.state == ctx.EXECUTING:
                pass # Hmm, that's not possible outside the block
            elif ctx.state == ctx.TIMED_OUT:
                server_status = 'not recheable' # Eeek the 10 seconds timeout occurred while executing the block
            elif ctx.state == ctx.INTERRUPTED:
                pass 
                # Oh you raised specifically the TimeoutException in the block
            elif ctx.state == ctx.CANCELED:
                pass # Oh you called to_ctx_mgr.cancel() method within the block but it # executed till the end
            else:
                pass 
                # That's not possible            
            #print 'aaaaaafter ', ctx.state == ctx.TIMED_OUT , ctx.state == ctx.EXCUTING 
            print 'aaaaaafter ', ctx.state == ctx.TIMED_OUT , ctx.TIMED_OUT,  
    
    else:   
        server_status = 'available'
        
    if server_status == 'available': 
        #runnable_string = cloud.serialization.cloudpickle.dumps(runnable)
        #socket.send_pyobj({'header': 'run', 
        #    'runnable_string': runnable_string, 
        #    'args': args, 
        #    'kwargs': kwargs
        #    })    
        msg = pack_runnable_msg(runnable, args, kwargs)
        socket.send_pyobj(msg)
       
        results = socket.recv_pyobj()
        status = 'done'
    else: 
        if server_status != 'not_reachable':
            status += '  %s'%(server_status, ) 


    
    # these are not necessary, but still good practice:
    socket.close()
    context.term()    
    return status, results
Exemplo n.º 13
0
 def send_shut_down(self): 
     context = zmq.Context()
     socket = context.socket(zmq.REQ)
     url = 'tcp://{}:{}'.format(self.host, self.port)
     socket.connect(url)
     socket.send_pyobj({'header': 'stop'})    
Exemplo n.º 14
0
             FLAGS.v_fov, FLAGS.gpu, FLAGS.filler_server_addr)


def exec_cmd(msg):
    func_name = msg[0]
    args = msg[1:]
    ret = getattr(server, func_name)(*args)
    return ret


while True:
    msg = msgpack.unpackb(socket.recv(), raw=False)
    ret = None

    if msg[0] == 'exit':
        socket.send_pyobj('ok')
        break

    elif msg[0] == 'cmd_list':
        for m in msg[1:]:
            ret = exec_cmd(m)
    else:
        ret = exec_cmd(msg)

    socket.send(msgpack.packb(ret, use_bin_type=True))

if FLAGS.bind:
    socket.unbind(FLAGS.addr)
else:
    socket.disconnect(FLAGS.addr)
Exemplo n.º 15
0
    context = zmq.Context()
    socket = context.socket(zmq.REP)
    socket.bind(FLAGS.addr)

    res = FLAGS.resolution

    filler = Filler(res, gpu=FLAGS.gpu)

    prefilled_img = np.zeros((res, res, 3), dtype=np.uint8)

    while True:
        msg = socket.recv_pyobj()
        ret = None

        if msg[0] == 'exit':
            socket.send_pyobj('ok')
            break

        elif msg[0] == 'args':
            opengl_arr, imgs, depths, pose, poses, fov = msg[1:]
            render_pc(cuda_pc, FLAGS.gpu, FLAGS.resolution, opengl_arr, imgs,
                      depths, pose, poses, prefilled_img, fov)
            ret = filler.fill(prefilled_img, opengl_arr)

        success = False
        while not success:
            try:
                socket.send_pyobj(ret, zmq.NOBLOCK)
                success = True
            except zmq.error.Again as e:
                time.sleep(0.01)
Exemplo n.º 16
0
def add_face():

    # json_path = '/mnt/code/face/face_tool/face_666.json'
    json_path = Code.face_path

    if not request.json or 'personInfo' not in request.json:
        return jsonify({'code':205, 'msg':'http请求参数错误'})

    add_list = request.json.get('personInfo')

    zmq_ip = Code.localIp
    zmq_port = Code.zmq_port

    try:
        context = zmq.Context()
        socket = context.socket(zmq.REQ)
        socket.connect("tcp://%s:%s" % (zmq_ip, zmq_port))  
        print('success to connnect the server ip:%s , port:%s' % (zmq_ip, zmq_port))
    except:
        print('fail to connnect the server')
         
        return jsonify({'code':204, 'msg':'程序内部错误'})

    person_list = []

    # 统计成功失败的数量的路径
    total_pic = 0
    success_pic = 0
    fail_pic = 0

    fail_list = []

    for person in add_list:
        try:
            ID = person.get('ID')
            name = person.get('name')
            tag = person.get('tag')
            path_list = person.get('filePath')

            print('正在添加:%s...' % name)

            if not ID or not name or not path_list:
                # print('此人信息不完整')
                continue

            for path in path_list:

                total_pic += 1

                start = time.time()
                # 路径转换
                img_path = '/extstore/mah' + path
                # img_path = path

                img = cv2.imread(img_path)

                # 发送zmq对象, 进行人脸检测
                pyobj = Munch()
                pyobj.img = img
                pyobj.type = 'feature_detection'
                socket.send_pyobj(pyobj)
                zmq_result = socket.recv_pyobj().result

                # 数量为 1才入库
                if len(zmq_result.get('results')) == 1:

                    front_face = zmq_result.get('results')[0].get('front_face')

                    if front_face:
                        freatrue = zmq_result.get('results')[0].get('feature')
                        # 把信息组成一个字典
                        person_info = {}
                        person_info["ID"] = ID
                        person_info["name"] = name
                        person_info["tag"] = tag
                        person_info["path"] = path
                        person_info["freatrue"] = freatrue
                    
                        # 添加进 person_list
                        person_list.append(person_info)

                        success_pic += 1

                        end = time.time()
                        print('添加一张人脸耗时:', end - start)

                    else:
                        print('此人为侧脸, 不满足入库要求')
                        fail_pic += 1
                        fail_list.append(path)

                        

                elif len(zmq_result.get('results')) == 0:
                    fail_pic += 1
                    fail_list.append(path)
                    continue
                else:
                    fail_pic += 1
                    fail_list.append(path)
                    continue
        except:
            continue

    # 把 person_list写进人脸库
    if os.path.isfile(json_path):
        pass
    else:
        with open(json_path, 'w') as f:
            f.write("[]")

    with open(json_path, 'r', encoding='utf-8-sig') as f:
        info_list = json.load(f)
        for i in person_list:
            info_list.append(i)
    # 写数据
    with open(json_path, 'w') as f:
        json.dump(info_list, f, ensure_ascii=False)


    print('total_pic:', total_pic)
    print('success_pic', success_pic)
    print('person_list:', len(person_list))
    print('fail_pic:', fail_pic)

    print('fail_list', fail_list)

    return jsonify(
        {
            "code":200, 
            "msg":"添加人脸成功",
            "detail":{
                        "total_add_number":total_pic,
                        "success_add_number":success_pic,
                        "fail_add_number":fail_pic,
                        "fail_add_picture":fail_list
                    }       
        }
    )
Exemplo n.º 17
0
def face_recognition():

    if not request.json or 'taskGUID' not in request.json:
        return jsonify({'code':205, 'msg':'http请求参数错误'})

    task_guid = request.json.get('taskGUID')
    file_path_list = request.json.get('filePath')
    url_list = request.json.get('url')
    imgbase64 = request.json.get('imgBase64')

    if not file_path_list and not url_list and not imgbase64:
        return jsonify({'code':205, 'msg':'http请求参数错误'})

    zmq_ip = Code.localIp
    zmq_port = Code.zmq_port
    
    try:
        context = zmq.Context()
        socket = context.socket(zmq.REQ)
        socket.connect("tcp://%s:%s" % (zmq_ip, zmq_port))  
        print('success to connnect the server ip:%s , port:%s' % (zmq_ip, zmq_port))
    except:
        print('fail to connnect the server')
        return jsonify({'code':204, 'msg':'程序内部错误'})

    
    face_list = []

    if file_path_list:
        for img_path in file_path_list:
            try:
                img = cv2.imread(img_path)
            except:
                continue
            pyobj = Munch()
            pyobj.img = img
            pyobj.type = 'face_recognition'
            socket.send_pyobj(pyobj)
            result_dict = socket.recv_pyobj().result.get('reslut')
            try:
                del result_dict['queryurl']
                del result_dict['msg']
                del result_dict['code']
            except:
                pass

            face_list.append(result_dict)

    elif url_list:
        for url in url_list:
            try:
                capture = cv2.VideoCapture(url)
                if capture.isOpened():
                    ret, img = capture.read()
            except:
                continue
            pyobj = Munch()
            pyobj.img = img
            pyobj.type = 'face_recognition'
            socket.send_pyobj(pyobj)
            result_dict = socket.recv_pyobj().result.get('reslut')
            try:
                del result_dict['queryurl']
                del result_dict['msg']
                del result_dict['code']
            except:
                pass

            face_list.append(result_dict)

    elif imgbase64:
        img_data = base64.b64decode(imgbase64)
        random_str = ''.join(random.sample('abcdefghijklmnopqrstuvwxyz0123456789',8))
        picture_dir = os.getcwd()
        if os.path.isdir(os.getcwd() + '/picture_dir'):
            pass
        else:
            os.makedirs(os.getcwd() + '/picture_dir')
            
        img_path = os.getcwd() + '/picture_dir/' + random_str + '.jpg'
        with open(img_path, "wb") as f2:
            f2.write(img_data)

        img = cv2.imread(img_path)

        pyobj = Munch()
        pyobj.img = img
        pyobj.type = 'face_recognition'
        socket.send_pyobj(pyobj)
        result_dict = socket.recv_pyobj().result.get('reslut')
        
        try:
            del result_dict['queryurl']
            del result_dict['msg']
            del result_dict['code']
        except:
            pass

        face_list.append(result_dict)
        os.remove(img_path)


    return jsonify({'taskGUID':task_guid, 'code': 200, 'result_list':face_list})
Exemplo n.º 18
0
def MasterProcess_func(process_ID,undertaker_table,file_names_tables,availability_table):
    print("master process no. "+str(process_ID)+" started")
    # process_ID = int(sys.argv[1])
    # IP_table = shared_memory.SharedMemory(name="IP_table") #connect to shared memory
    port = str(4000+process_ID) #port for receiving requests
    context = zmq.Context()
    socket = context.socket(zmq.REP)
    socket.bind("tcp://%s:%s"  %( get_ip_address(), port)) #bind server
    # socket_datakeeper = context.socket(zmq.PULL)
    # socket.bind("tcp://%s:%s"  %( get_ip_address(), port))
    print(len(file_names_tables),file_names_tables)
    print(len(availability_table),availability_table)
    while True:
        #wait for new request
        print("#####################################################################################")
        print("waiting for new request")
        request_type,file_name = socket.recv_pyobj()
        print(str(process_ID)+" process received a request of type :"+request_type)
        print(file_name)

        print(file_names_tables,file_name)
        IP_return_list = find_file(file_names_tables,file_name) #search file name in all data keepers
        if request_type == "upload":
            print("IP_return_list : ",IP_return_list)
            if(len(IP_return_list) != 0):
                socket.send_pyobj("error : file already uploaded before")
                print("master response : error // file already uploaded before")
            else:
                '''
                upload sequence
                '''
                message = upload_handler(availability_table)
                print("master response :"+message +" is free to upload to")
                socket.send_pyobj(message)
                # wait for receive from data keeper
                # send success to client
                # free data keeper -> set IPport in availability_table to True

        elif (request_type == "download"):
            print("IP_return_list : ",IP_return_list)
            if(len(IP_return_list) != 0):
                '''
                download sequence
                '''
                message = download_handler(availability_table,IP_return_list)
                print("master response :"+str(len(message)) +" are free to download from")
                socket.send_pyobj(message) #send array of free IP:Port to client

            else:
                socket.send_pyobj("error : file not found in any data keeper")
                print("master response : error // file not found in any data keeper")
        elif request_type == "replyDownload":
            availability_table[file_name] = False
            print(file_name + "is taken now..")
            socket.send_pyobj("Fol 3alik ya client")
        elif request_type == "dataKeeperSuccess":
            IPport,oldrequest,filedownloaded=file_name
            print(file_name)
            availability_table[IPport] = True
            if (oldrequest == "upload"):
                IP,port=IPport.split(":")
                templist = file_names_tables[IP]
                templist.append(filedownloaded)
                file_names_tables[IP] = templist
                #file_names_tables[IP].append(filedownloaded)
                print("file_names_tables[IP] = ",file_names_tables[IP])
            #TODO : send success sig to client
            socket.send_pyobj("Fol 3alik ya data keeper")

        else:
            socket.send_pyobj("error // request type not known")
            print("master response : error // request type not known")
Exemplo n.º 19
0
def launch_server():

    beam = FakeBeam()
    port = "5000"
    context = zmq.Context()
    socket = context.socket(zmq.REP)
    socket.bind("tcp://*:%s" % port)

    full_maxlen = 2000000

    # Get data

    peak_8_median = deque(maxlen=full_maxlen)
    peak_9_median = deque(maxlen=full_maxlen)
    peak_10_median = deque(maxlen=full_maxlen)

    peak_8_std = deque(maxlen=full_maxlen)
    peak_9_std = deque(maxlen=full_maxlen)
    peak_10_std = deque(maxlen=full_maxlen)

    peak_8_median_TS = deque(
        maxlen=full_maxlen)  # Maybe use this for stdev too?
    peak_9_median_TS = deque(maxlen=full_maxlen)
    peak_10_median_TS = deque(maxlen=full_maxlen)

    # Subscribe to devices
    beam.peak_8.subscribe(
        partial(new_data,
                median=peak_8_median,
                stdev=peak_8_std,
                medianTS=peak_8_median_TS))

    beam.peak_9.subscribe(
        partial(new_data,
                median=peak_9_median,
                stdev=peak_9_std,
                medianTS=peak_9_median_TS))

    beam.peak_10.subscribe(
        partial(new_data,
                median=peak_10_median,
                stdev=peak_10_std,
                medianTS=peak_10_median_TS))

    medianDict = {
        'peak_8_median': peak_8_median,
        'peak_9_median': peak_9_median,
        'peak_10_median': peak_10_median
    }
    stdevDict = {
        'peak_8_std': peak_8_std,
        'peak_9_std': peak_9_std,
        'peak_10_std': peak_10_std
    }
    median_stdev_TS_Dict = {
        'peak_8_std_median_TS': peak_8_median_TS,
        'peak_9_std_median_TS': peak_9_median_TS,
        'peak_10_std_median_TS': peak_10_median_TS
    }

    data = {
        'medianDict': medianDict,
        'stdevDict': stdevDict,
        'median_stdev_TS_Dict': median_stdev_TS_Dict
    }

    # Keep sending data
    while True:

        message = socket.recv()
        socket.send_pyobj(data)
Exemplo n.º 20
0
def send_msg(msg, socket=None, server=None,  querry_timeout=5, info=0):
    """
        return the result of running the task *runnable* with the given 
        arguments.
        
        params: 
            host: e.g. '210.45.117.30' or 'qtg7501' if use the later should add ip hostname pair in /etc/hosts
            querry:  querry whether server available 
            querry_timeout: 
                我曾经试过用 stopit module 来给recv设置timeout, 但是没有成功,应该是涉及到背后线程没有关闭
                refer to https://github.com/zeromq/pyzmq/issues/132
    """
    if socket is None: 
        assert server is not None 
        context, socket = conn_server(server, info=info)
    
    if 1: 
        socket.setsockopt(zmq.LINGER, 0)   #this is needed or else timeout wont work 
        #socket.send_pyobj('querry')    
        socket.send_pyobj(msg)    
        # use poll for timeouts:
        poller = zmq.Poller()
        poller.register(socket, zmq.POLLIN)
        if poller.poll(querry_timeout*1000): # 10s timeout in milliseconds
            reply = socket.recv_pyobj()
        else:
            #raise IOError("Timeout processing auth request")        
            #reply = None
            reply = 'not_reachable'

        if 0:  # below not working  
            try: 
                with stopit.SignalTimeout(querry_timeout, False) as ctx:
                #with stopit.ThreadingTimeout(querry_timeout, False) as ctx:
                    print 'tttttry', port, host  
                    server_status = socket.recv_pyobj()
            except Exception as err: 
                print 'rrrraise', err 
                #socket.close()
                #context.term()
                #raise 
                #server_status = 'not_reachable'
                raise 
            print 'sssssss', ctx.state    
            if ctx.state == ctx.EXECUTED:
                pass # All's fine, everything was executed within 10 seconds
            elif ctx.state == ctx.EXECUTING:
                pass # Hmm, that's not possible outside the block
            elif ctx.state == ctx.TIMED_OUT:
                server_status = 'not recheable' # Eeek the 10 seconds timeout occurred while executing the block
            elif ctx.state == ctx.INTERRUPTED:
                pass 
                # Oh you raised specifically the TimeoutException in the block
            elif ctx.state == ctx.CANCELED:
                pass # Oh you called to_ctx_mgr.cancel() method within the block but it # executed till the end
            else:
                pass 
                # That's not possible            
            #print 'aaaaaafter ', ctx.state == ctx.TIMED_OUT , ctx.state == ctx.EXCUTING 
            print 'aaaaaafter ', ctx.state == ctx.TIMED_OUT , ctx.TIMED_OUT,  
    
    
    return reply 
Exemplo n.º 21
0
def get_learner(
    learners: List[BaseLearner],
    fnames: List[str],
    url: str,
    log_fname: str,
    job_id: str,
    job_name: str,
) -> Tuple[str, str]:
    """Get a learner from the database running at `url` and this learner's
    process will be logged in `log_fname` and running under `job_id`.

    Parameters
    ----------
    learners : list of `adaptive.BaseLearner` isinstances
        List of `learners` corresponding to `fnames`.
    fnames : list
        List of `fnames` corresponding to `learners`.
    url : str
        The url of the database manager running via
        (`adaptive_scheduler.server_support.manage_database`).
    log_fname : str
        The filename of the log-file. Should be passed in the job-script.
    job_id : str
        The job_id of the process the job. Should be passed in the job-script.
    job_name : str
        The name of the job. Should be passed in the job-script.

    Returns
    -------
    fname : str
        The filename of the learner that was chosen.
    """
    _add_log_file_handler(log_fname)
    log.info(
        "trying to get learner", job_id=job_id, log_fname=log_fname, job_name=job_name
    )
    with ctx.socket(zmq.REQ) as socket:
        socket.connect(url)
        socket.send_pyobj(("start", job_id, log_fname, job_name))
        log.info(f"sent start signal, timeout after 10s.")
        socket.setsockopt(zmq.RCVTIMEO, 10_000)  # timeout after 10s
        reply = socket.recv_pyobj()
        log.info("got reply", reply=str(reply))
        if reply is None:
            msg = f"No learners to be run."
            exception = RuntimeError(msg)
            log_exception(log, msg, exception)
            raise exception
        elif isinstance(reply, Exception):
            log_exception(log, "got an exception", exception=reply)
            raise reply
        else:
            fname = reply
            log.info(f"got fname")

    def maybe_lst(fname: Union[Tuple[str], str]):
        if isinstance(fname, tuple):
            # TinyDB converts tuples to lists
            fname = list(fname)
        return fname

    try:
        learner = next(l for l, f in zip(learners, fnames) if maybe_lst(f) == fname)
    except StopIteration:
        msg = "Learner with this fname doesn't exist in the database."
        exception = UserWarning(msg)
        log_exception(log, msg, exception)
        raise exception

    log.info("picked a learner")
    return learner, fname
Exemplo n.º 22
0
print(operando2)


print("operandos: ", operando1, " ", operando2)

# --------------------operacion----------------------
try:
    resultado = str(int(operando1) * int(operando2))
except ZeroDivisionError:
    resultado = "no se puede hacer la operacion"


print("resultado: ", resultado)
result['respuesta'] = resultado

with open('result.json', 'w') as file:
        json.dump(result, file)

with open('result.json', 'r') as file:
    dat = json.load(file)

archivo = dat.get("respuesta")
print(archivo)

socket.send_pyobj(archivo)





Exemplo n.º 23
0
def launch_server():

    port = "5000"
    context = zmq.Context()
    socket = context.socket(zmq.PUB)
    socket.bind("tcp://*:%s" % port)

    maxlen = 1000000

    # Get data
    beam = FakeBeam()
    peak_8 = deque(maxlen=maxlen)
    peak_8_TS = deque(maxlen=maxlen)
    peak_9 = deque(maxlen=maxlen)
    peak_9_TS = deque(maxlen=maxlen)
    peak_10 = deque(maxlen=maxlen)
    peak_10_TS = deque(maxlen=maxlen)
    peak_11 = deque(maxlen=maxlen)
    peak_11_TS = deque(maxlen=maxlen)
    peak_12 = deque(maxlen=maxlen)
    peak_12_TS = deque(maxlen=maxlen)
    peak_13 = deque(maxlen=maxlen)
    peak_13_TS = deque(maxlen=maxlen)
    peak_14 = deque(maxlen=maxlen)
    peak_14_TS = deque(maxlen=maxlen)
    peak_15 = deque(maxlen=maxlen)
    peak_15_TS = deque(maxlen=maxlen)

    # Subscribe to devices
    beam.peak_8.subscribe(partial(new_data, in_value=peak_8,
                                  in_time=peak_8_TS))

    beam.peak_9.subscribe(partial(new_data, in_value=peak_9,
                                  in_time=peak_9_TS))

    beam.peak_10.subscribe(
        partial(new_data, in_value=peak_10, in_time=peak_10_TS))

    beam.peak_11.subscribe(
        partial(new_data, in_value=peak_11, in_time=peak_11_TS))

    beam.peak_12.subscribe(
        partial(new_data, in_value=peak_12, in_time=peak_12_TS))

    beam.peak_13.subscribe(
        partial(new_data, in_value=peak_13, in_time=peak_13_TS))

    beam.peak_14.subscribe(
        partial(new_data, in_value=peak_14, in_time=peak_14_TS))

    beam.peak_15.subscribe(
        partial(new_data, in_value=peak_15, in_time=peak_15_TS))

    peakDict = {
        'peak_8': peak_8,
        'peak_9': peak_9,
        'peak_10': peak_10,
        'peak_11': peak_11,
        'peak_12': peak_12,
        'peak_13': peak_13,
        'peak_14': peak_14,
        'peak_15': peak_15
    }
    peakTSDict = {
        'peak_8_TS': peak_8_TS,
        'peak_9_TS': peak_9_TS,
        'peak_10_TS': peak_10_TS,
        'peak_11_TS': peak_11_TS,
        'peak_12_TS': peak_12_TS,
        'peak_13_TS': peak_13_TS,
        'peak_14_TS': peak_14_TS,
        'peak_15_TS': peak_15_TS
    }

    data = {'peakDict': peakDict, 'peakTSDict': peakTSDict}

    # Send data a half second intervals
    while True:
        socket.send_pyobj(data)
        print(len(data['peakDict']['peak_8']))
        time.sleep(1)
Exemplo n.º 24
0
def runmaster(nClients):

    setupDict = yaml.load(open('smalldata_plot.yml', 'r'))
    master_port = setupDict['master']['port']

    context = zmq.Context()
    socket = context.socket(zmq.PUB)
    socket.bind("tcp://*:%s" % master_port)

    myDict = {'runNumber': -1}

    hutches = ['amo', 'sxr', 'xpp', 'xcs', 'mfx', 'cxi', 'mec']
    hutch = None
    print('master_PUB pre pre hostname')
    import socket as skt
    hostname = skt.gethostname()
    print('master_PUB post pre hostname')
    #hostname=socket.gethostname() #this is a problem now - not interactively...WHY?
    #print('master_PUB post hostname')
    for thisHutch in hutches:
        if hostname.find(thisHutch) >= 0:
            hutch = thisHutch.upper()
    if hutch is None:
        #then check current path
        path = os.getcwd()
        for thisHutch in hutches:
            if path.find(thisHutch) >= 0:
                hutch = thisHutch.upper()
    if hutch is None:
        print(
            'cannot figure out which hutch we are in to use. resetting at end of run will not work'
        )

    #main "thread" to get the MPI data and append it to the dict.
    nDataReceived = 0
    print(
        'About to start the while loop for the master process w/ %d clients' %
        nClients)
    while nClients > 0:
        print('MASTER got data: ', nDataReceived)
        nDataReceived += 1
        ##get current run number & reset if new. Put into thread?
        ##here, I'm resetting the master dict on a new run. For now, this is not exactly how this should run.
        ##need to maybe use a deque for the jet tracking? Figure out much later how to combine...
        #if hutch is not None and nDataReceived%(size-1)==(size-2):
        #    lastRun = RegDB.experiment_info.experiment_runs(hutch)[-1]['num']
        #    #if the run number has changed, reset the master dictionary & set the new run number.
        #    if lastRun != myDict['runNumber']:
        #        print('Reset master dict, new run number: %d'%lastRun)
        #        myDict.clear()
        #        myDict['runNumber']=lastRun

        # Remove client if the run ended
        md = mpidata()
        md.recv()
        ##ideally, there is a reset option from the bokeh server, but we can make this
        ##optional & reset on run boundaries instead/in addition.
        if md.small.endrun:  #what if going from just running to recording?
            print('ENDRUN!')
            #nClients -= 1 #No...
            myDict.clear()
            myDict['runNumber'] = lastRun
        else:
            print('DEBUG: master: ', md.nEvts)
            #append the lists in the dictionary we got from the clients to a big master dict.
            for mds in md.small.arrayinfolist:
                #print 'mds name: ',mds.name
                if mds.name not in myDict.keys():
                    myDict[mds.name] = getattr(md, mds.name)
                else:
                    myDict[mds.name] = np.append(myDict[mds.name],
                                                 getattr(md, mds.name),
                                                 axis=0)
            #check if dict is aligned
            for mds in md.small.arrayinfolist:
                if mds.name == 'nEvts': continue
                if mds.name == 'send_timeStamp': continue
                if myDict[mds.name].shape[0] != myDict['event_time'].shape[0]:
                    print('We are out of alignment for %s ' % mds.name,
                          myDict[mds.name].shape[0],
                          myDict['event_time'].shape[0])

            #md.addarray('evt_ts',np.array(evt_ts))
            evt_ts_str = '%.4f' % (md.send_timeStamp[0] +
                                   md.send_timeStamp[1] / 1e9)
            #here we will send the dict (or whatever we make this here) to the plots.
            print('master data: ', myDict.keys())
            print('master has events: ', myDict['lightStatus__xray'].shape)

            #
            # this is if we send data off via ZMQ.
            #
            #print("smallData master received request: ", message)
            #if len(dict_to_send.keys())>0:
            #    print("smallData master will send : ", dict_to_send['lightStatus__xray'].shape)
            #else:
            #    print("we have an empty dictionary right now....")
            socket.send_pyobj(myDict)
def launch_server():

    global oldTime
    oldTime = time.time()
    port = "5000"
    context = zmq.Context()
    socket = context.socket(zmq.REP)
    socket.bind("tcp://*:%s" % port)
    
    maxlen = 500000
    full_maxlen = 2000000
    
    # Get data
    beam = FakeBeam()
    peak_8 = deque(maxlen=maxlen)
    peak_8_TS = deque(maxlen=maxlen)
    peak_9 = deque(maxlen=maxlen)
    peak_9_TS = deque(maxlen=maxlen)
    peak_10 = deque(maxlen=maxlen)
    peak_10_TS = deque(maxlen=maxlen)
    
    peak_8_median = deque(maxlen=full_maxlen)
    peak_9_median = deque(maxlen=full_maxlen)
    peak_10_median = deque(maxlen=full_maxlen)
    
    peak_8_std = deque(maxlen=full_maxlen)
    
    peak_8_median_TS = deque(maxlen=full_maxlen) # Maybe use this for stdev too?
    
    peak_8_temp = []
    peak_9_temp = []
    peak_10_temp = []

    
    # Subscribe to devices
    beam.peak_8.subscribe(
        partial(new_data, 
                in_value=peak_8, 
                in_time=peak_8_TS, 
                temp=peak_8_temp, 
                median=peak_8_median,
                stdev=peak_8_std,
                medianTS=peak_8_median_TS)
    )
    
#     beam.peak_9.subscribe(
#         partial(new_data, in_value=peak_9, in_time=peak_9_TS, temp=peak_9_temp)
#     )
    
#     beam.peak_10.subscribe(
#         partial(new_data, in_value=peak_10, in_time=peak_10_TS, temp=peak_10_temp)
#     )
    
    peakDict = {
        'peak_8':peak_8, 
        #'peak_9':peak_9, 
        #'peak_10':peak_10, 
        
    }
    peakTSDict = {
        'peak_8_TS':peak_8_TS, 
        #'peak_9_TS':peak_9_TS, 
        #'peak_10_TS':peak_10_TS, 
        
    }
    medianDict = {
        'peak_8_median':peak_8_median,
    }
    stdevDict = {
        'peak_8_std':peak_8_std,
    }
    median_stdevDict={
        'peak_8_std_median_TS':peak_8_median_TS
    }
    
    data = {
        'peakDict':peakDict,
        'peakTSDict':peakTSDict,
        'medianDict':medianDict,
        'stdevDict':stdevDict,
        'median_stdevDict':median_stdevDict
    }
    
    # Send data a half second intervals
    while True:
#         socket.send_pyobj(data)
#         print(len(data['peakTSDict']['peak_8_TS']))
#         time.sleep(1)
        
        message = socket.recv()
        print("Received request: ", message)
        socket.send_pyobj(data)