示例#1
0
 def _on_actuator_result(self, topic, headers, message, match):
     """lock result"""
     msg = jsonapi.loads(message[0])
     print 'Actuator Results:', match, msg                    
     if headers['requesterID'] == agent_id:
         if self.actuator_handler is not None:
             self.actuator_handler(match, jsonapi.loads(message[0]))
示例#2
0
 def load_config_from_json(self):
     """load config from existing json connector files."""
     c = self.config
     # load from engine config
     with open(os.path.join(self.profile_dir.security_dir, 'ipcontroller-engine.json')) as f:
         cfg = json.loads(f.read())
     key = c.Session.key = asbytes(cfg['exec_key'])
     xport,addr = cfg['url'].split('://')
     c.HubFactory.engine_transport = xport
     ip,ports = addr.split(':')
     c.HubFactory.engine_ip = ip
     c.HubFactory.regport = int(ports)
     self.location = cfg['location']
     if not self.engine_ssh_server:
         self.engine_ssh_server = cfg['ssh']
     # load client config
     with open(os.path.join(self.profile_dir.security_dir, 'ipcontroller-client.json')) as f:
         cfg = json.loads(f.read())
     assert key == cfg['exec_key'], "exec_key mismatch between engine and client keys"
     xport,addr = cfg['url'].split('://')
     c.HubFactory.client_transport = xport
     ip,ports = addr.split(':')
     c.HubFactory.client_ip = ip
     if not self.ssh_server:
         self.ssh_server = cfg['ssh']
     assert int(ports) == c.HubFactory.regport, "regport mismatch"
示例#3
0
 def on_message(self, message):
     prefix, message = message.split(",", 1)
     kernel, channel = prefix.split("/", 1)
     if channel=="stdin":
         # TODO: Support the stdin channel
         # See http://ipython.org/ipython-doc/dev/development/messaging.html
         return
     try:
         if kernel == "complete":
             application = self.session.handler.application
             message = jsonapi.loads(message)
             if message["header"]["msg_type"] in ("complete_request", "object_info_request"):
                 application.completer.registerRequest(self, message)
         elif kernel not in self.channels:
             # handler may be None in certain circumstances (it seems to only be set
             # in GET requests, not POST requests, so even using it here may
             # only work with JSONP because of a race condition)
             application = self.session.handler.application
             kernel_info = application.km.kernel_info(kernel)
             self.kernel_info = {'remote_ip': kernel_info['remote_ip'],
                                 'referer': kernel_info['referer'],
                                 'timeout': kernel_info['timeout']}
             self.channels[kernel] = \
                 {"shell": ShellSockJSHandler(kernel, self.send, application),
                  "iopub": IOPubSockJSHandler(kernel, self.send, application)}
             self.channels[kernel]["shell"].open(kernel)
             self.channels[kernel]["iopub"].open(kernel)
         if kernel != "complete":
             self._log_stats(kernel, message)
             self.channels[kernel][channel].on_message(message)
     except KeyError:
         jsonmessage=jsonapi.loads(message)
         logger.info("%s message sent to deleted kernel: %s"%(jsonmessage["header"]["msg_type"], kernel))
         pass # Ignore messages to nonexistant or killed kernels
示例#4
0
文件: client.py 项目: bbinet/circus
    def iter_messages(self):
        """ Yields tuples of (watcher, subtopic, stat)"""
        recv = self.pubsub_socket.recv_multipart
        with self:
            while True:
                try:
                    events = dict(self.poller.poll(self.timeout * 1000))
                except zmq.ZMQError as e:
                    if e.errno == errno.EINTR:
                        continue
                    raise

                if len(events) == 0:
                    continue

                try:
                    topic, stat = recv()
                except zmq.core.error.ZMQError as e:
                    if e.errno != errno.EINTR:
                        raise
                    else:
                        try:
                            sys.exc_clear()
                        except Exception:
                            pass
                        continue

                topic = s(topic).split('.')
                if len(topic) == 3:
                    __, watcher, subtopic = topic
                    yield watcher, subtopic, json.loads(stat)
                elif len(topic) == 2:
                    __, watcher = topic
                    yield watcher, None, json.loads(stat)
示例#5
0
def unpack_legacy_message(headers, message):
    """Unpack legacy pubsub messages for VIP agents.

    Loads JSON-formatted message parts and removes single-frame messages
    from their containing list. Does not alter headers.
    """
    if not isinstance(headers, Headers):
        headers = Headers(headers)
    try:
        content_type = headers["Content-Type"]
    except KeyError:
        return headers, message
    if isinstance(content_type, basestring):
        if content_type.lower() == "application/json":
            if isinstance(message, list) and len(message) == 1:
                return jsonapi.loads(message[0])
            if isinstance(message, basestring):
                return jsonapi.loads(message)
        if isinstance(message, list) and len(message) == 1:
            return message[0]
    if isinstance(content_type, list) and isinstance(message, list):
        parts = [
            (jsonapi.loads(msg) if str(ctype).lower() == "application/json" else msg)
            for ctype, msg in zip(content_type, message)
        ]
        parts.extend(message[len(parts) :])
        if len(parts) == len(content_type) == 1:
            return parts[0]
        return parts
    return message
示例#6
0
    def capture_data(self, peer, sender, bus, topic, headers, message, device):
        
        timestamp_string = headers.get(headers_mod.DATE)
        timestamp, my_tz = process_timestamp(timestamp_string)
        
        try:
            # 2.0 agents compatability layer makes sender == pubsub.compat so 
            # we can do the proper thing when it is here
            if sender == 'pubsub.compat':
                message = jsonapi.loads(message[0])
                
            if isinstance(message, dict):
                values = message
            else:
                values = message[0]
                
        except ValueError as e:
            _log.error("message for {topic} bad message string: {message_string}".format(topic=topic,
                                                                                     message_string=message[0]))
            return
        except IndexError as e:
            _log.error("message for {topic} missing message string".format(topic=topic))
            return
        except Exception as e:
            _log.exception(e)
            return

        meta = {}
        try:
            # 2.0 agents compatability layer makes sender == pubsub.compat so 
            # we can do the proper thing when it is here
            if sender == 'pubsub.compat':
                if isinstance(message[1], str):
                    meta = jsonapi.loads(message[1])
            
            if not isinstance(message, dict):
                meta = message[1]
                
        except ValueError as e:
            _log.warning("meta data for {topic} bad message string: {message_string}".format(topic=topic,
                                                                                     message_string=message[0]))
        except IndexError as e:
            _log.warning("meta data for {topic} missing message string".format(topic=topic))

        if topic.startswith('analysis'):
            source = 'analysis'
        else:
            source = 'scrape'
        _log.debug("Queuing {topic} from {source} for publish".format(topic=topic,
                                                                      source=source))
        
        for key, value in values.iteritems():
            point_topic = device + '/' + key
            self._event_queue.put({'source': source,
                                   'topic': point_topic,
                                   'readings': [(timestamp,value)],
                                   'meta': meta.get(key,{})})
示例#7
0
def test_agent_last_update_increases(volttron_instance):
    agent = volttron_instance.build_agent()
    s = json.loads(agent.vip.health.get_status())
    dt = dateparse(s['last_updated'], fuzzy=True)
    agent.vip.health.set_status(STATUS_UNKNOWN, 'Unknown now!')
    gevent.sleep(1)
    s = json.loads(agent.vip.health.get_status())
    dt2 = dateparse(s['last_updated'], fuzzy=True)
    assert dt < dt2
示例#8
0
 def test_send(self):
     self.publisher.send('test', 'method', '{"temp": 20, "humid": 30}')
     result = self.subscriber.recv()
     self.assertEqual(result[0], b'test') # data_type
     
     msg = json.loads(result[1])
     self.assertEqual(msg[0], 'method')
     
     data = json.loads(msg[1])
     self.assertEqual(data['temp'], 20)
     self.assertEqual(data['humid'], 30)        
示例#9
0
    def test_call(self):
        self.state_handler.update_sensor({'test_temperature': 22})
        self.state_handler.update_appliance({'viera': 1})        
        self.state_handler.__call__()

        result = self.subscriber.recv()
        
        msg = json.loads(result[1])
        self.assertEqual(msg[0], 'mining')
        
        data = json.loads(msg[1])
        self.assertEqual(data['sensors']['test_temperature'], 22)                
        self.assertEqual(data['appliances']['viera'], 1)
示例#10
0
def test_agent_status_changes(volttron_instance):
    unknown_message = "This is unknown"
    bad_message = "Bad kitty"
    agent = volttron_instance.build_agent()
    agent.vip.health.set_status(STATUS_UNKNOWN, unknown_message)
    r = json.loads(agent.vip.health.get_status())
    assert unknown_message == r['context']
    assert STATUS_UNKNOWN == r['status']

    agent.vip.health.set_status(STATUS_BAD, bad_message)
    r = json.loads(agent.vip.health.get_status())
    assert bad_message == r['context']
    assert STATUS_BAD == r['status']
示例#11
0
def pair_recv(msg):
    print "Received from Server:"
    try:
        try:
            j = jsonapi.loads(''.join(msg))
        except TypeError:
            j = jsonapi.loads(msg)

        if j['state'] == 'naming':
            #assign_names(j['mapping'])
            forward_from_mesh()
        elif j['state'] == 'commanding':
            send_commands(j['commanding'])
    finally:
        pass
示例#12
0
    def call(self, cmd):
        if isinstance(cmd, string_types):
            raise DeprecationWarning('call() takes a mapping')

        call_id = uuid.uuid4().hex
        cmd['id'] = call_id
        try:
            cmd = json.dumps(cmd)
        except ValueError as e:
            raise CallError(str(e))

        try:
            yield tornado.gen.Task(self.stream.send, cmd)
        except zmq.ZMQError as e:
            raise CallError(str(e))

        while True:
            messages = yield tornado.gen.Task(self.stream.on_recv)
            for message in messages:
                try:
                    res = json.loads(message)
                    if res.get('id') != call_id:
                        # we got the wrong message
                        continue
                    raise tornado.gen.Return(res)
                except ValueError as e:
                    raise CallError(str(e))
示例#13
0
 def on_message(self, message):
     prefix, json_message = message.split(",", 1)
     kernel_id = prefix.split("/", 1)[0]
     message = jsonapi.loads(json_message)
     logger.debug("KernelConnection.on_message: %s", message)
     application = self.session.handler.application
     if kernel_id == "complete":
         if message["header"]["msg_type"] in ("complete_request",
                                              "object_info_request"):
             application.completer.registerRequest(self, message)
         return
     try:
         if kernel_id not in self.channels:
             # handler may be None in certain circumstances (it seems to only be set
             # in GET requests, not POST requests, so even using it here may
             # only work with JSONP because of a race condition)
             kernel_info = application.km.kernel_info(kernel_id)
             self.kernel_info = {'remote_ip': kernel_info['remote_ip'],
                                 'referer': kernel_info['referer'],
                                 'timeout': kernel_info['timeout']}
         if message["header"]["msg_type"] == "execute_request":
             stats_logger.info(StatsMessage(
                 kernel_id=kernel_id,
                 remote_ip=self.kernel_info['remote_ip'],
                 referer=self.kernel_info['referer'],
                 code=message["content"]["code"],
                 execute_type='request'))
         if kernel_id not in self.channels:
             self.channels[kernel_id] = SockJSChannelsHandler(self.send)
             self.channels[kernel_id].open(application, kernel_id)
         self.channels[kernel_id].on_message(json_message)
     except KeyError:
         # Ignore messages to nonexistent or killed kernels.
         logger.info("%s message sent to nonexistent kernel: %s" %
                     (message["header"]["msg_type"], kernel_id))
示例#14
0
 def on_message(self, message):
     if self.km._kernels.get(self.kernel_id) is not None:
         msg = jsonapi.loads(message)
         for f in self.msg_to_kernel_callbacks:
             f(msg)
         self.kernel["executing"] += 1
         self.session.send(self.shell_stream, msg)
示例#15
0
    def handle_frontend_messages(self, data):
        """
        docstring

        Frame 0: [ N ][...]  <- Identity of connection
        Frame 1: [ N ][...]  <- Data frame

        """
        logging.debug(
            'Received message on frontend stream: %s',
            data
        )

        if not self.is_valid_message(data):
            logging.warning('Message validation failed')
            return

        identity, msg = data[0], jsonapi.loads(data[1])
        is_command = msg['message'].startswith('/')

        # Everything except for /CONNECT should have a valid identity
        if msg['message'].split()[0] != '/CONNECT':
            if not self.is_valid_identity(who=msg['who'], identity=identity):
                logging.warning('Invalid identity, ignoring message')
                return

        if is_command:
            self.process_command(msg=msg, identity=identity)
        else:
            self.broadcast(**msg)
示例#16
0
    def is_valid_message(self, data):
        """
        docstring

        """
        logging.debug('Validating message: %s', data)

        if len(data) != 2:
            logging.warning('Too many frames received: %s', len(data))
            return False

        identity, msg = data
        try:
            msg = jsonapi.loads(msg)
        except (TypeError, ValueError) as e:
            logging.warning('Cannot decode message: %s', e)
            return False

        # Check for required message attributes
        required = ('who', 'message')
        if not all(k in msg for k in required):
            logging.warning('Required attributes missing in message')
            return False

        if not all(msg.get(k) for k in required):
            logging.warning('Empty data in message found')
            return False

        return True
示例#17
0
    def routerRecv(self, message):
        """
        message = [ ... , request, image/blank]

        request = {'timestamp': timestamp, 
                   'task': 'detection'/'recognition'/'tracking',
                   'parameters': (...)}
        """
        request = loads(message[-2])

        if request["task"] == "detection":
            self.logger.debug("start detection")
            with open("image.jpg", "wb") as f:
                f.write(message[-1])
            sleep = random.randint(1, 2)  # detection
            time.sleep(sleep)
            message[-2] = dumps("detection")
            message[-1] = ""
            self.rtr.send_multipart(message)

        elif request["task"] == "tracking":
            self.logger.debug("prepare to tracking")
            message[-1] = "finish"
            tracker.Tracker(self.rtr, message)
        else:
            self.logger.debug("requested task is not supported")
示例#18
0
     def handle_new(self, headers, message, now):
         print 'handle new'
         requester = headers.get('requesterID')
         self.task_id = headers.get('taskID')
         priority = headers.get('priority')
         
         try:
             requests = jsonapi.loads(message[0])
             requests = requests[0]
         except (ValueError, IndexError) as ex:
             # Could be ValueError of JSONDecodeError depending
             # on if simplesjson was used.  JSONDecodeError
             # inherits from ValueError
             
             #We let the schedule manager tell us this is a bad request.
             _log.error('bad request: {request}, {error}'.format(request=requests, error=str(ex)))
             requests = []
         device, start, end = requests
 
         self.start_time = parser.parse(start, fuzzy=True)
         self.end_time = parser.parse(end, fuzzy=True)
         event = sched.Event(self.announce)
         self.schedule(self.start_time, event) 
           
         
         topic = topics.ACTUATOR_SCHEDULE_RESULT()
         headers = self.get_headers(requester, task_id=self.task_id)
         headers['type'] = SCHEDULE_ACTION_NEW
         self.publish_json(topic, headers, {'result':'SUCCESS', 
                                            'data': 'NONE', 
                                            'info': 'NONE'})
示例#19
0
    def handle_recv(self, data):
        """called each time circusd sends an event"""
        # maintains a periodic callback to compute mem and cpu consumption for
        # each pid.
        logger.debug('Received an event from circusd: %s' % str(data))
        topic, msg = data
        try:
            topic = s(topic)
            watcher = topic.split('.')[1:-1][0]
            action = topic.split('.')[-1]
            msg = json.loads(msg)

            if action in ('reap', 'kill'):
                # a process was reaped
                pid = msg['process_pid']
                self.remove_pid(watcher, pid)
            elif action == 'spawn':
                # a process was added
                pid = msg['process_pid']
                self._append_pid(watcher, pid)
            elif action == 'stop':
                # the whole watcher was stopped.
                self.stop_watcher(watcher)
            else:
                logger.debug('Unknown action: %r' % action)
                logger.debug(msg)
        except Exception:
            logger.exception('Failed to handle %r' % msg)
示例#20
0
        def _on_new_data(self, topic, headers, message, match):
            """watching for new data"""
            data = jsonapi.loads(message[0])
#             self.current_spacetemp = float(data["ZoneTemp"])
            self.current_spacetemp = 76
            droveride = bool(int(data["CoolCall2"]))
            occupied = bool(int(data["Occupied"]))
            
            if droveride and self.state not in ('IDLE', 'CLEANUP', 'STARTUP'):
                print 'User Override Initiated'
                self.cancel_event()
            
            if not occupied and self.state in ('DR_EVENT', 'RESTORE'):
                self.cancel_event()
                
            if self.state == 'IDLE' or self.state=='STARTUP':
                #self.default_coolingstpt = float(data["CoolingStPt"])
                #self.default_heatingstpt = float(data["HeatingStPt"])
                self.default_coolingstpt = 75.0
                self.default_heatingstpt = 65.0
                self.default_firststage_fanspeed = float(data["CoolSupplyFanSpeed1"])
                self.default_secondstage_fanspeed = float(data["CoolSupplyFanSpeed2"])
                self.default_damperstpt = float(data["ESMDamperMinPosition"])
                
            if self.state == 'STARTUP':
                self.state = 'IDLE'
示例#21
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args)

        self.latency_log = None
        if kwargs['latency_debug']:
            self.latency_log = list()

        self.logger = logging.getLogger(logger.name+'.'+self.__class__.__name__)
        self.sock = ctx.socket(zmq.SUB)
        self.sock.subscribe = b''
        self.sock.connect(self.endpoint)
        self.logger.info('Connecting to endpoint %s', self.endpoint)

        msg = self.sock.recv_multipart()
        meta = ChunkMeta(*jsonapi.loads(msg[0]))
        self.logger.info(
            'Detected remote format: chunksize=%d channels=%d srate=%d',
            meta.chunksize, meta.channels, meta.srate)
        self.chunksize = meta.chunksize
        self.channels = meta.channels
        self.srate = meta.srate
        self.seq = meta.seq

        self.chunk_q = queue.Queue(kwargs['max_buffer']//self.chunksize + 1)
        self.ioloop = ioloop.ZMQIOLoop(time_func=time.monotonic)
        self.sockstream = zmqstream.ZMQStream(self.sock, self.ioloop)
        self.sockstream.on_recv(self._enqueue)
        threading.Thread(target=lambda loop: loop.start(), name='AudioOutput.IOLoop',
                         args=(self.ioloop,), daemon=True).start()

        self._open_stream('output')
示例#22
0
def recv_json(socket, timeout = None, ignore_exiting = False):
    """
    Receives JSON from a socket. Assumes socket is set to timeout properly.
    Raises universal.Exiting if program is exiting, or zmq.ZMQError if
    timed out.

    timeout is in milliseconds

    """

    poller = zmq.Poller()
    poller.register(socket, zmq.POLLIN)

    if timeout is not None:
        start_time = time.time() * 1000

    poll_wait_time = 1000 if timeout is None else min(timeout, 1000)
    while ignore_exiting or not universal.exiting:
        if poller.poll(poll_wait_time):
            msg = socket.recv_multipart()

            # Decode the json in the innermost frame
            msg[-1] = jsonapi.loads(msg[-1])

            # If only one frame was received simply return that frame
            if len(msg) == 1: msg = msg[0]

            return msg
        elif timeout is not None and start_time + timeout <= time.time() * 1000:
            raise Timeout()

    raise universal.Exiting()
示例#23
0
 def _update_entry(self, frames):
     """
     NOT USED - FOR FUTURE
     Update routing table entries.
     :param frames:
     :return:
     """
     if len(frames) > 6:
         sender = bytes(frames[0])
         routing_table = bytes(frames[7])
         routing_table = jsonapi.loads(routing_table)
         _log.debug("ROUTING SERVICE Ext routing TABLE: {0}, MY {1} ".format(routing_table, self._routing_table))
         for vip_id in routing_table:
             if vip_id in self._routing_table.keys():
                 if vip_id != self._my_vip_id:
                     my_route_list = self._routing_table[vip_id]
                     if len(routing_table[vip_id]) > 0 and len(routing_table[vip_id]) < len(my_route_list):
                         my_route_list = [sender]
                         self._routing_table[vip_id] = my_route_list.extend(routing_table[vip_id])
             else:
                 route_list = [sender]
                 self._routing_table[vip_id] = route_list.extend(routing_table[vip_id])
         _log.debug("ROUTING SERVICE my routing TABLE: {} ".format(self._routing_table))
         return True
     else:
         return False
示例#24
0
 def recv_message(self, flags=0):
     '''Recieve a message as (topic, headers, message) tuple.'''
     topic = self.recv_string(flags)
     headers = self.recv_string(flags) if self.rcvmore else ''
     headers = jsonapi.loads(headers) if headers else {}
     message = self.recv_multipart(flags) if self.rcvmore else []
     return topic, Headers(headers), message
示例#25
0
    def __call__(self, _, nframes, time_info, status):
        if status != 0:
            if status & 5:
                self.logger.warning('Status: underflow')
            elif status & 10:
                self.logger.warning('Status: overflow')

        msg = self.chunk_q.get()
        if len(msg[1]) == 0:
            self.sockstream.stop_on_recv()
            self.remake.set()
            return (b'', 2)
        meta = ChunkMeta(*jsonapi.loads(msg[0]))

        if (meta.chunksize != self.chunksize) or \
           (meta.channels != self.channels) or \
           (meta.srate != self.srate):
            self.logger.info('Remote format changed. Signaling for restart.')
            self.sockstream.stop_on_recv()
            self.remake.set()
            return (b'', 2)

        if meta.seq != self.seq + 1:
            self.logger.warning('Sequence break: expected %d got %d', self.seq+1, meta.seq)
        self.seq = meta.seq

        if self.latency_log is not None:
            self.latency_log.append(time_info['output_buffer_dac_time'] - meta.adc_time)
            if len(self.latency_log)*self.chunksize >= self.srate*5:
                self.ioloop.add_callback(self._latency_report, self.latency_log)
                self.latency_log = list()
        data = np.frombuffer(msg[1], dtype=meta.dtype).reshape((-1, meta.channels))
        return (data.astype(np.float32), 0)
示例#26
0
 def on_message(self, message):
     prefix, message = message.split(",", 1)
     id = prefix.split("/", 1)[0]
     message = jsonapi.loads(message)
     logger.debug("SockJSHandler.on_message: %s", message)
     msg_type = message["header"]["msg_type"]
     app = self.session.handler.application
     if id == "complete":
         if msg_type in ("complete_request", "object_info_request"):
             app.completer.registerRequest(self, message)
         return
     try:
         kernel = app.kernel_dealer.kernel(id)
     except KeyError:
         # Ignore messages to nonexistent or killed kernels.
         logger.warning("%s sent to nonexistent kernel %s", msg_type, id)
         return
     if id not in self.channels:
         self.channels[id] = SockJSChannelsHandler(self.send)
         self.channels[id].connect(kernel)
     if msg_type == "execute_request":
         stats_logger.info(StatsMessage(
             kernel_id=id,
             remote_ip=kernel.remote_ip,
             referer=kernel.referer,
             code=message["content"]["code"],
             execute_type="request"))
     self.channels[id].send(message)
示例#27
0
 def schedule_result(self, topic, headers, message, match):
     msg = jsonapi.loads(message[0])
     self._log.info('Schedule Request Acknowledged')
     self.task_timer.cancel()
     task_id = headers.get('taskID', 0)
     response_type = headers.get('type', 0)
     schedule_start = self.device_schedule[task_id]["schedule_start"]
     event_start = schedule_start + datetime.timedelta(minutes = 1)
     schedule_end = self.device_schedule[task_id]["schedule_end"]
     e_start = self.device_schedule[task_id]["event_start"]
     e_end = self.device_schedule[task_id]["event_end"]
     
     if response_type == 'NEW_SCHEDULE' and self.error_handler == None:
         if msg.get('result',0) == 'SUCCESS':
             event = sched.Event(self.pre_cool_setup, args=[e_start, e_end])
             self.schedule(event_start, event) 
             self.all_scheduled_events[e_start] = event 
         elif msg.get('result',0) =='FAILURE' and schedule_start < schedule_end:
             schedule_start = schedule_start + datetime.timedelta(minutes = 10)
             headers = {         
                         'type':  'NEW_SCHEDULE',
                        'requesterID': agent_id,
                        'taskID': task_id,
                        'priority': 'High'
                        }
             self.task_timer = self.periodic_timer(20, self.publish_json,
                               topics.ACTUATOR_SCHEDULE_REQUEST(), headers,
                               [["{campus}/{building}/{unit}".format(**rtu_path),str(schedule_start),schedule_end]])
         elif schedule_start >= schedule_end:
             return
     if self.error_handler is not None:
         self.error_handler()
示例#28
0
    def test_connect(self):
        """Tests that we can connect and send data to a zmq puller"""

        # start dummy ZMQ pull server
        gevent.spawn(self._start_zmq_puller)
        self.zmq_server_listning_event.wait(5)

        # our local zmq logger
        zmq_url = 'tcp://localhost:{0}'.format(self.zmq_tcp_port)
        client_public_key = "N[DC7+%FKdW3pJUPnaCwWxt-0/jo5Lrq&U28-GG}"
        client_secret_key = "Gwt%C0a8J/:9Jy$qpDNTy8wRzlnRD-HT8H>u7F{B"
        server_public_key = "^4b:-bZ8seRC+m2p(sg{7{skOuK*jInNeH^/Le}Q"
        zmqLogger = ZmqLogger(zmq_url, client_public_key, client_secret_key, server_public_key)
        zmqLogger.start()

        # inject some data into the logging relay singleton
        self.reportingRelay.queueLogData({'somekey': 'somedata'})

        # wait until the zmq server put something into the local testing queue
        received_data = self.testing_queue.get(5)
        received_data = received_data.split(' ', 1)
        topic, message = received_data[0], jsonapi.loads(received_data[1])

        self.assertEqual(topic, ZmqMessageTypes.HERALDING_AUTH_LOG.value)
        self.assertIn('somekey', message)
        self.assertEqual(message['somekey'], 'somedata')
示例#29
0
    def update_override_patterns(self):
        if self._override_patterns is None:
            try:
                values = self.vip.config.get("override_patterns")
                values = jsonapi.loads(values)

                if isinstance(values, dict):
                    self._override_patterns = set()
                    for pattern, end_time in values.items():
                        # check the end_time
                        now = utils.get_aware_utc_now()
                        # If end time is indefinite, set override with indefinite duration
                        if end_time == "0.0":
                            self._set_override_on(pattern, 0.0, from_config_store=True)
                        else:
                            end_time = utils.parse_timestamp_string(end_time)
                            # If end time > current time, set override with new duration
                            if end_time > now:
                                delta = end_time - now
                                self._set_override_on(pattern, delta.total_seconds(), from_config_store=True)
                else:
                    self._override_patterns = set()
            except KeyError:
                self._override_patterns = set()
            except ValueError:
                _log.error("Override patterns is not set correctly in config store")
                self._override_patterns = set()
示例#30
0
 def _on_error_result(self, topic, headers, message, match):
     """ERROR result"""
     point = match.group(1)
     msg = jsonapi.loads(message[0])
     point = match.group(1)
     today = datetime.datetime.now().date()
     for key,schedule in self.device_schedule.items():
         if schedule["date"] == today:
             schedule_start = schedule["schedule_start"]
             schedule_end = schedule["schedule_end"]
             task_id = key
             break
                
     self._log.info('Error Results: '+ str(point) + '  '+ str(msg))
     if msg.get('type',0) == 'LockError':
         headers = {         
                     'type':  'NEW_SCHEDULE',
                    'requesterID': agent_id,
                    'taskID': task_id,
                    'priority': 'HIGH'
                        }
         self.task_timer = self.periodic_timer(20, self.publish_json,
                               topics.ACTUATOR_SCHEDULE_REQUEST(), headers,
                               [["{campus}/{building}/{unit}".format(**rtu_path),str(schedule_start),str(schedule_end)]])
         
     elif self.error_handler is not None:
         self._log.info('Running error handler')
         self.error_handler()
示例#31
0
 def _on_new_data(self, topic, headers, message, match):
     """watching for new data"""
     data = jsonapi.loads(message[0])
     self.data_queue.notify_all(data)
示例#32
0
    def query(self,
              topic,
              start=None,
              end=None,
              skip=0,
              count=None,
              order="FIRST_TO_LAST"):
        """This function should return the results of a query in the form:
        {"values": [(timestamp1, value1), (timestamp2, value2), ...],
         "metadata": {"key1": value1, "key2": value2, ...}}

         metadata is not required (The caller will normalize this to {} for you)
        """
        query = '''SELECT data.ts, data.value_string
                   FROM data, topics
                   {where}
                   {order_by}
                   {limit}
                   {offset}'''

        where_clauses = [
            "WHERE topics.topic_name = %s", "topics.topic_id = data.topic_id"
        ]
        args = [topic]

        if start is not None:
            where_clauses.append("data.ts > %s")
            args.append(start)

        if end is not None:
            where_clauses.append("data.ts < %s")
            args.append(end)

        where_statement = ' AND '.join(where_clauses)

        order_by = 'ORDER BY data.ts ASC'
        if order == 'LAST_TO_FIRST':
            order_by = ' ORDER BY data.ts DESC'

        #can't have an offset without a limit
        # -1 = no limit and allows the user to
        # provied just an offset
        if count is None:
            count = 100

        limit_statement = 'LIMIT %s'
        args.append(count)

        offset_statement = ''
        if skip > 0:
            offset_statement = 'OFFSET %s'
            args.append(skip)

        _log.debug("About to do real_query")

        real_query = query.format(where=where_statement,
                                  limit=limit_statement,
                                  offset=offset_statement,
                                  order_by=order_by)
        _log.debug("Real Query: " + real_query)
        _log.debug("args: " + str(args))

        conn = self.connect()
        cur = conn.cursor()
        cur.execute(real_query, args)
        rows = cur.fetchall()
        if rows:
            values = [(ts.isoformat(), jsonapi.loads(value))
                      for ts, value in rows]
        else:
            values = {}
        cur.close()
        conn.close()
        return {'values': values}
示例#33
0
 def schedule_result(self, topic, headers, message, match):
     msg = jsonapi.loads(message[0])
     _log.debug('Actuator response received')
     self.task_timer.cancel()
示例#34
0
#-----------------------------------------------------------------------------

# default values for the thresholds:
MAX_ITEMS = 64
MAX_BYTES = 1024

# ISO8601-ify datetime objects
# allow unicode
# disallow nan, because it's not actually valid JSON
json_packer = lambda obj: jsonapi.dumps(
    obj,
    default=date_default,
    ensure_ascii=False,
    allow_nan=False,
)
json_unpacker = lambda s: jsonapi.loads(s)

pickle_packer = lambda o: pickle.dumps(squash_dates(o), PICKLE_PROTOCOL)
pickle_unpacker = pickle.loads

default_packer = json_packer
default_unpacker = json_unpacker

DELIM = b"<IDS|MSG>"
# singleton dummy tracker, which will always report as done
DONE = zmq.MessageTracker()

#-----------------------------------------------------------------------------
# Mixin tools for apps that use Sessions
#-----------------------------------------------------------------------------
示例#35
0
    def _run(self, _, frontend, sink, *backend_socks):
        def push_new_job(_job_id, _json_msg, _msg_len):
            # backend_socks[0] is always at the highest priority
            _sock = backend_socks[
                0] if _msg_len <= self.args.priority_batch_size else rand_backend_socket
            _sock.send_multipart([_job_id, _json_msg])

        # bind all sockets
        self.logger.info('bind all sockets')
        frontend.bind('tcp://*:%d' % self.port)
        addr_front2sink = auto_bind(sink)
        addr_backend_list = [auto_bind(b) for b in backend_socks]
        self.logger.info('open %d ventilator-worker sockets' %
                         len(addr_backend_list))

        # start the sink process
        self.logger.info('start the sink')
        proc_sink = BertSink(self.args, addr_front2sink)
        self.processes.append(proc_sink)
        proc_sink.start()
        addr_sink = sink.recv().decode('ascii')

        # start the backend processes
        device_map = self._get_device_map()
        for idx, device_id in enumerate(device_map):
            process = BertWorker(idx, self.args, addr_backend_list, addr_sink,
                                 device_id, self.graph_path)
            self.processes.append(process)
            process.start()

        rand_backend_socket = None
        server_status = ServerStatistic()
        while True:
            try:
                request = frontend.recv_multipart()
            except ValueError:
                self.logger.error(
                    'received a wrongly-formatted request (expected 4 frames, got %d)'
                    % len(request))
                self.logger.error('\n'.join('field %d: %s' % (idx, k)
                                            for idx, k in enumerate(request)),
                                  exc_info=True)
            else:
                client, msg, req_id, msg_len = request
                server_status.update(request)
                if msg == ServerCommand.terminate:
                    break
                elif msg == ServerCommand.show_config:
                    self.logger.info(
                        'new config request\treq id: %d\tclient: %s' %
                        (int(req_id), client))
                    status_runtime = {
                        'client': client.decode('ascii'),
                        'num_process': len(self.processes),
                        'ventilator -> worker': addr_backend_list,
                        'worker -> sink': addr_sink,
                        'ventilator <-> sink': addr_front2sink,
                        'server_current_time': str(datetime.now()),
                        'statistic': server_status.value,
                        'device_map': device_map,
                        'num_concurrent_socket': self.num_concurrent_socket
                    }

                    sink.send_multipart([
                        client, msg,
                        jsonapi.dumps({
                            **status_runtime,
                            **self.status_args,
                            **self.status_static
                        }), req_id
                    ])
                else:
                    self.logger.info(
                        'new encode request\treq id: %d\tsize: %d\tclient: %s'
                        % (int(req_id), int(msg_len), client))
                    # register a new job at sink
                    sink.send_multipart(
                        [client, ServerCommand.new_job, msg_len, req_id])

                    # renew the backend socket to prevent large job queueing up
                    # [0] is reserved for high priority job
                    # last used backennd shouldn't be selected either as it may be queued up already
                    rand_backend_socket = random.choice([
                        b for b in backend_socks[1:]
                        if b != rand_backend_socket
                    ])

                    # push a new job, note super large job will be pushed to one socket only,
                    # leaving other sockets free
                    job_id = client + b'#' + req_id
                    if int(msg_len) > self.max_batch_size:
                        seqs = jsonapi.loads(msg)
                        job_gen = ((job_id + b'@%d' % i,
                                    seqs[i:(i + self.max_batch_size)]) for i in
                                   range(0, int(msg_len), self.max_batch_size))
                        for partial_job_id, job in job_gen:
                            push_new_job(partial_job_id, jsonapi.dumps(job),
                                         len(job))
                    else:
                        push_new_job(job_id, msg, int(msg_len))

        self.logger.info('terminated!')
示例#36
0
 def load_message(msg):
     return json.loads(msg)
示例#37
0
if __name__ == "__main__":
    #signal.signal(signal.SIGTERM, exitHandler)
    signal.signal(signal.SIGINT, exitHandler)
    signal.signal(signal.SIGQUIT, exitHandler)

    logging.info("%s started" % os.path.basename(sys.argv[0]))

    if len(sys.argv) > 1:
        uwscfg = UWSConfig(sys.argv[1])
    else:
        uwscfg = UWSConfig()

    try:
        with open(uwscfg.json_moveto_path, "rb") as fh:
            sensor_store_ = json.loads(fh.read())
    except Exception as e:
        logging.debug(e)
        try:
            with open(uwscfg.json_backup_path, "rb") as fh:
                sensor_store_ = json.loads(fh.read())
        except Exception as e:
            logging.debug(e)

    for k in set(sensor_store_.keys()).difference(
            set(uwscfg.zmq_subscribe.split(" "))):
        # del old sensordata of sensor we do not subscribe to
        del sensor_store_[k]

    for k in sensor_store_.keys():
        try:
示例#38
0
文件: control.py 项目: monarin/lcls2
def main():

    # Process arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('pvbase', help='EPICS PV base (e.g. DAQ:LAB2:PART:2)')
    parser.add_argument('-p',
                        type=int,
                        choices=range(0, 8),
                        default=0,
                        help='platform (default 0)')
    parser.add_argument('-C',
                        metavar='CM_HOST',
                        default='localhost',
                        help='Collection Manager host')
    parser.add_argument('-u',
                        metavar='UNIQUE_ID',
                        default='control',
                        help='Name')
    parser.add_argument('-v', action='store_true', help='be verbose')
    args = parser.parse_args()

    if args.v:
        logging.basicConfig(level=logging.DEBUG,
                            format='%(asctime)s - %(levelname)s - %(message)s')
    else:
        logging.basicConfig(level=logging.WARNING,
                            format='%(asctime)s - %(levelname)s - %(message)s')

    logging.info('control level starting')

    ctx = zmq.Context()

    coll = Collection(ctx, args.C, args.p)

    pybody = {}
    pybody['host'] = gethostname()
    pybody['pid'] = getpid()
    idbody = {}
    idbody['procInfo'] = pybody
    mainbody = {}
    mainbody['control'] = idbody
    hellomsg = CollectMsg(key=CollectMsg.HELLO, body=json.dumps(mainbody))
    partition = coll.partitionInfo(hellomsg)
    pprint.pprint(json.loads(partition.body))

    # set up our end of connections, potentially based on the information
    # about who is in the partition (e.g. number of eb/drp nodes)
    # control sockets (ephemeral ports)
    control_router_socket = ctx.socket(zmq.ROUTER)
    control_pull_socket = ctx.socket(zmq.PULL)
    control_router_socket.bind("tcp://*:*")
    control_pull_socket.bind("tcp://*:*")
    control_router_port = Collection.parse_port(
        control_router_socket.getsockopt(zmq.LAST_ENDPOINT))
    control_pull_port = Collection.parse_port(
        control_pull_socket.getsockopt(zmq.LAST_ENDPOINT))
    logging.debug('control_router_port = %d' % control_router_port)
    logging.debug('control_pull_port = %d' % control_pull_port)

    pybody = {}
    pybody['router_port'] = {
        'adrs': gethostname(),
        'port': control_router_port
    }
    pybody['pull_port'] = {'adrs': gethostname(), 'port': control_pull_port}
    connbody = {}
    connbody['connectInfo'] = pybody
    mainbody = {}
    mainbody['control'] = connbody

    portsmsg = CollectMsg(key=CollectMsg.CONNECTINFO,
                          body=json.dumps(mainbody))
    connect_info = coll.connectionInfo(portsmsg)
    pprint.pprint(json.loads(connect_info.body))

    # now make the connections and report to CM when done

    # Control state
    yy = ControlStateMachine(args.pvbase)
    logging.debug("ControlStateMachine state: %s" % yy.state())

    poller = zmq.Poller()
    poller.register(control_router_socket, zmq.POLLIN)
    poller.register(control_pull_socket, zmq.POLLIN)
    try:
        while True:
            items = dict(poller.poll(1000))

            # Handle control_pull_socket socket
            if control_pull_socket in items:
                msg = control_pull_socket.recv()
                config = dgram.Dgram(view=msg)
                # now it's in dgram.Dgram object
                ttt = config.seq.timestamp()
                print('Timestamp:', ttt)  # FIXME

            # Execute state command request
            if control_router_socket in items:
                msg = control_router_socket.recv_multipart()
                identity = msg[0]
                request = msg[1]
                logging.debug('Received <%s> from control_router_socket' %
                              request.decode())

                if request == ControlMsg.PING:
                    # Send reply to client
                    logging.debug("Sending <PONG> reply")
                    control_router_socket.send(identity, zmq.SNDMORE)
                    cmmsg = ControlMsg(key=ControlMsg.PONG)
                    cmmsg.send(control_router_socket)
                    continue

                if request == ControlMsg.PONG:
                    continue

                if request in [
                        Transition.configure, Transition.beginrun,
                        Transition.enable, Transition.disable,
                        Transition.endrun, Transition.unconfigure,
                        ControlMsg.GETSTATE
                ]:

                    if request != ControlMsg.GETSTATE:
                        oldstate = yy.state()
                        # Do transition
                        yy.on_transition(request)
                        newstate = yy.state()
                        if newstate != oldstate:
                            logging.debug("ControlStateMachine state: %s" %
                                          newstate)

                    # Send reply to client
                    control_router_socket.send(identity, zmq.SNDMORE)
                    cmmsg = ControlMsg(key=yy.state().key())
                    cmmsg.send(control_router_socket)
                    continue

                else:
                    logging.warning("Unknown msg <%s>" % request.decode())
                    # Send reply to client
                    logging.debug("Sending <HUH?> reply")
                    control_router_socket.send(identity, zmq.SNDMORE)
                    cmmsg = ControlMsg(key=ControlMsg.HUH)
                    cmmsg.send(control_router_socket)
                    continue

    except KeyboardInterrupt:
        logging.debug("Interrupt received")

    # Clean up
    logging.debug("Clean up control level")

    # Close all sockets associated with this context, and then
    # terminate the context.
    ctx.destroy(0)

    logging.info('control level exiting')
示例#39
0
    def _run(self, receiver, frontend, sender):
        receiver_addr = auto_bind(receiver)
        frontend.connect(self.front_sink_addr)
        sender.bind('tcp://*:%d' % self.port)

        pending_jobs = defaultdict(
            lambda: SinkJob(self.max_seq_len, self.max_position_embeddings,
                            self.show_tokens_to_client, self.fixed_embed_length
                            ))  # type: Dict[str, SinkJob]

        poller = zmq.Poller()
        poller.register(frontend, zmq.POLLIN)
        poller.register(receiver, zmq.POLLIN)

        # send worker receiver address back to frontend
        frontend.send(receiver_addr.encode('ascii'))

        # Windows does not support logger in MP environment, thus get a new logger
        # inside the process for better compability
        logger = set_logger(colored('SINK', 'green'), self.verbose)
        logger.info('ready')
        self.is_ready.set()

        while not self.exit_flag.is_set():
            socks = dict(poller.poll())
            if socks.get(receiver) == zmq.POLLIN:
                msg = receiver.recv_multipart()
                job_id = msg[0]
                # parsing job_id and partial_id
                job_info = job_id.split(b'@')
                job_id = job_info[0]
                partial_id = int(job_info[1]) if len(job_info) == 2 else 0

                if msg[3] == ServerCmd.data_embed:
                    # parsing the ndarray
                    arr_info, arr_val = jsonapi.loads(msg[1]), msg[2]
                    x = np.frombuffer(memoryview(arr_val),
                                      dtype=arr_info['dtype']).reshape(
                                          arr_info['shape'])
                    pending_jobs[job_id].add_embed(x, partial_id)
                elif msg[3] == ServerCmd.data_token:
                    x = jsonapi.loads(msg[1])
                    pending_jobs[job_id].add_token(x, partial_id)
                else:
                    logger.error(
                        'received a wrongly-formatted request (expected 4 frames, got %d)'
                        % len(msg))
                    logger.error('\n'.join('field %d: %s' % (idx, k)
                                           for idx, k in enumerate(msg)),
                                 exc_info=True)

                logger.info(
                    'collect %s %s (E:%d/T:%d/A:%d)' %
                    (msg[3], job_id, pending_jobs[job_id].progress_embeds,
                     pending_jobs[job_id].progress_tokens,
                     pending_jobs[job_id].checksum))

            if socks.get(frontend) == zmq.POLLIN:
                client_addr, msg_type, msg_info, req_id = frontend.recv_multipart(
                )
                if msg_type == ServerCmd.new_job:
                    job_info = client_addr + b'#' + req_id
                    # register a new job
                    pending_jobs[job_info].checksum = int(msg_info)
                    logger.info('job register\tsize: %d\tjob id: %s' %
                                (int(msg_info), job_info))
                    if len(pending_jobs[job_info]._pending_embeds)>0 \
                            and pending_jobs[job_info].final_ndarray is None:
                        pending_jobs[job_info].add_embed(None, 0)
                elif msg_type == ServerCmd.show_config or msg_type == ServerCmd.show_status:
                    time.sleep(
                        0.1
                    )  # dirty fix of slow-joiner: sleep so that client receiver can connect.
                    logger.info('send config\tclient %s' % client_addr)
                    sender.send_multipart([client_addr, msg_info, req_id])

            # check if there are finished jobs, then send it back to workers
            finished = [(k, v) for k, v in pending_jobs.items() if v.is_done]
            for job_info, tmp in finished:
                client_addr, req_id = job_info.split(b'#')
                x, x_info = tmp.result
                sender.send_multipart([client_addr, x_info, x, req_id])
                logger.info('send back\tsize: %d\tjob id: %s' %
                            (tmp.checksum, job_info))
                # release the job
                tmp.clear()
                pending_jobs.pop(job_info)
示例#40
0
 def get_server_config(self):
     self.socket.send(b'SHOW_CONFIG')
     response = self.socket.recv_multipart()
     print('the server at %s:%d returns the following config:' % (self.ip, self.port))
     for k, v in jsonapi.loads(response[0]).items():
         print('%30s\t=\t%-30s' % (k, v))
示例#41
0
文件: agent.py 项目: miraabid/bemoss
 def _read_store(self):
     if os.path.exists('email.store'):
         with open('email.store', 'r') as f:
             self._sent_emails = jsonapi.loads(f.read())
     else:
         self._sent_emails = {}
示例#42
0
 def weather_response(self, topic, headers, message, match):
     data = float(jsonapi.loads(message[0]))
     print data
     self.weather_data_queue.notify_all(data)
示例#43
0
 def __overide(self, topic, headers, message, match):
     """watch for override from controller"""
     data = jsonapi.loads(message[0])
     if not bool(data):
         self.tasklet = greenlet.greenlet(self.__on_override)
         self.tasklet.switch()
示例#44
0
 def _recv_ndarray(self):
     request_id, response = self._recv()
     arr_info, arr_val = jsonapi.loads(response[1]), response[2]
     X = np.frombuffer(_buffer(arr_val), dtype=str(arr_info['dtype']))
     return Response(request_id,
                     self.formatter(X.reshape(arr_info['shape'])))
示例#45
0
    def post(self):
        if 'Origin' in self.request.headers:
            self.set_header('Access-Control-Allow-Origin',
                            self.request.headers['Origin'])
            self.set_header('Access-Control-Allow-Credentials', 'true')
        if (config.get('requires_tos')
                and self.get_argument('accepted_tos', 'false') != 'true'):
            self.set_status(403)
            self.finish(
                'When evaluating code, you must acknowledge your acceptance '
                'of the terms of service at /static/tos.html by passing the '
                'parameter accepted_tos=true\n')
            return
        code = ''.join(self.get_arguments('code', strip=False))
        if len(code) > 65000:
            self.set_status(413)
            self.finish('Max code size is 65000 characters')
            return
        remote_ip = self.request.remote_ip
        referer = self.request.headers.get('Referer', '')
        self.kernel = yield tornado.gen.Task(
            self.application.kernel_dealer.get_kernel,
            rlimits=config.get("provider_settings")["preforked_rlimits"],
            lifespan=config.get("max_lifespan"),
            timeout=0)
        sm = StatsMessage(kernel_id=self.kernel.id,
                          remote_ip=remote_ip,
                          referer=referer,
                          code=code,
                          execute_type='service')
        if remote_ip == '127.0.0.1':
            stats_logger.debug(sm)
        else:
            stats_logger.info(sm)
        self.zmq_handler = ZMQServiceHandler()
        streams = self.zmq_handler.streams
        self.zmq_handler.connect(self.kernel)
        loop = tornado.ioloop.IOLoop.instance()

        def kernel_callback(msg):
            if msg['msg_type'] == 'execute_reply':
                loop.remove_timeout(self.timeout_handle)
                streams['success'] = msg['content']['status'] == 'ok'
                streams['execute_reply'] = msg['content']
            if self.kernel.status == "idle" and 'success' in streams:
                logger.debug('service request finished for %s', self.kernel.id)
                loop.add_callback(self.finish_request)

        self.zmq_handler.msg_from_kernel_callbacks.append(kernel_callback)

        def timeout_callback():
            logger.debug('service request timed out for %s', self.kernel.id)
            self.kernel.stop()
            self.zmq_handler.streams['success'] = False
            loop.add_callback(self.finish_request)

        self.timeout_handle = loop.call_later(30, timeout_callback)
        exec_message = {
            'channel': 'shell',
            'parent_header': {},
            'header': {
                'msg_id': str(uuid.uuid4()),
                'username': '',
                'session': self.kernel.id,
                'msg_type': 'execute_request',
            },
            'content': {
                'code':
                code,
                'silent':
                False,
                'user_expressions':
                jsonapi.loads(self.get_argument('user_expressions', '{}')),
                'allow_stdin':
                False,
            },
            'metadata': {},
        }
        self.zmq_handler.send(exec_message)
示例#46
0
    def _run(self, outputs, inputs, *receivers):
        # Windows does not support logger in MP environment, thus get a new logger
        # inside the process for better compatibility
        logger = set_logger(colored('WORKER-%d' % self.worker_id, 'yellow'))

        logger.info(
            'use device %s' %
            ('cpu' if self.device_id < 0 else 'gpu: %d' % self.device_id))

        poller = zmq.Poller()
        for sock, addr in zip(receivers, self.worker_address):
            sock.connect(addr)
            poller.register(sock, zmq.POLLIN)

        outputs.connect(self.sink_address)
        inputs.connect(self.sink_address)

        logger.info('ready and listening!')
        self.is_ready.set()

        while not self.exit_flag.is_set():
            events = dict(poller.poll())
            for sock_idx, sock in enumerate(receivers):
                if sock in events:
                    client_id, msg_type, raw_msg = sock.recv_multipart()
                    msg = jsonapi.loads(raw_msg)

                    if msg_type == ServerCmd.initiate:
                        self.model = SequenceTaggingModel()
                        self.modelid = str(msg)
                        if os.path.isfile(
                                os.path.join('.', 'model' + self.modelid +
                                             '.pre')) and os.path.isfile(
                                                 os.path.join(
                                                     '.', 'model' +
                                                     self.modelid + '.pt')):
                            self.model.load('model' + self.modelid)
                            logger.info(
                                'new job\tsocket: %d\tsize: %d\tclient: %s' %
                                (sock_idx, 1, client_id))
                            helper.send_test(outputs, client_id,
                                             b'Model Loaded', ServerCmd.load)
                            logger.info('job done\tsize: %s\tclient: %s' %
                                        (1, client_id))
                        else:
                            logger.info(
                                'new job\tsocket: %d\tsize: %d\tclient: %s' %
                                (sock_idx, 1, client_id))
                            helper.send_test(outputs, client_id,
                                             b'Model Initiated', msg_type)
                            logger.info('job done\tsize: %s\tclient: %s' %
                                        (1, client_id))

                    elif msg_type == ServerCmd.online_initiate:
                        self.model.online_word_build(
                            msg[0], msg[1]
                        )  # whole unlabeled training sentences / predefined_labels
                        logger.info(
                            'new job\tsocket: %d\tsize: %d\tclient: %s' %
                            (sock_idx, len(msg[0]), client_id))
                        helper.send_test(outputs, client_id,
                                         b'Online word build completed',
                                         msg_type)
                        logger.info('job done\tsize: %s\tclient: %s' %
                                    (len(msg[0]), client_id))

                    elif msg_type == ServerCmd.online_learning:
                        self.model.online_learning(msg[0], msg[1], msg[2],
                                                   msg[3])
                        self.model.save('model' + self.modelid)
                        logger.info(
                            'new job\tsocket: %d\tsize: %d\tclient: %s' %
                            (sock_idx, len(msg[0]), client_id))
                        helper.send_test(outputs, client_id,
                                         b'Online learning completed',
                                         msg_type)
                        logger.info('job done\tsize: %s\tclient: %s' %
                                    (len(msg[0]), client_id))

                    elif msg_type == ServerCmd.predict:
                        analyzed_result = self.model.analyze(msg)
                        logger.info(
                            'new job\tsocket: %d\tsize: %d\tclient: %s' %
                            (sock_idx, 1, client_id))
                        helper.send_test(outputs, client_id,
                                         jsonapi.dumps(analyzed_result),
                                         msg_type)
                        logger.info('job done\tsize: %s\tclient: %s' %
                                    (1, client_id))

                    elif msg_type == ServerCmd.active_learning:
                        indices, scores = self.model.active_learning(
                            msg[0], msg[1])
                        json_indices = list(map(int, indices))
                        json_scores = list(map(float, scores))
                        active_data = {
                            'indices': json_indices,
                            'scores': json_scores,
                        }
                        logger.info(
                            'new job\tsocket: %d\tsize: %d\tclient: %s' %
                            (sock_idx, len(msg[0]), client_id))
                        helper.send_test(outputs, client_id,
                                         jsonapi.dumps(active_data), msg_type)
                        logger.info('job done\tsize: %s\tclient: %s' %
                                    (len(msg[0]), client_id))
def convertToPython(content):
    # return session.json_unpacker(content)
    # Old way: this needs to be deleted if the above line is enough
    return jsonapi.loads(content)
示例#48
0
 def on_message(self, msg):
     self.send(jsonapi.loads(msg))
示例#49
0
    def _run(self, receiver, frontend, sender):
        receiver_addr = auto_bind(receiver)
        frontend.connect(self.front_sink_addr)
        sender.bind('tcp://*:%d' % self.port)

        pending_checksum = defaultdict(int)
        pending_result = defaultdict(list)
        job_checksum = defaultdict(int)

        poller = zmq.Poller()
        poller.register(frontend, zmq.POLLIN)
        poller.register(receiver, zmq.POLLIN)

        # send worker receiver address back to frontend
        frontend.send(receiver_addr.encode('ascii'))

        # Windows does not support logger in MP environment, thus get a new logger
        # inside the process for better compability
        logger = set_logger(colored('SINK', 'green'), self.verbose)
        logger.info('ready')

        while not self.exit_flag.is_set():
            socks = dict(poller.poll())
            if socks.get(receiver) == zmq.POLLIN:
                msg = receiver.recv_multipart()
                job_id = msg[0]
                # parsing the ndarray
                arr_info, arr_val = jsonapi.loads(msg[1]), msg[2]
                X = np.frombuffer(memoryview(arr_val), dtype=arr_info['dtype'])
                X = X.reshape(arr_info['shape'])
                job_info = job_id.split(b'@')
                job_id = job_info[0]
                partial_id = job_info[1] if len(job_info) == 2 else 0
                pending_result[job_id].append((X, partial_id))
                pending_checksum[job_id] += X.shape[0]
                logger.info(
                    'collect job %s (%d/%d)' %
                    (job_id, pending_checksum[job_id], job_checksum[job_id]))

                # check if there are finished jobs, send it back to workers
                finished = [(k, v) for k, v in pending_result.items()
                            if pending_checksum[k] == job_checksum[k]]
                for job_info, tmp in finished:
                    logger.info('send back\tsize: %d\tjob id:%s\t' %
                                (job_checksum[job_info], job_info))
                    # re-sort to the original order
                    tmp = [x[0] for x in sorted(tmp, key=lambda x: int(x[1]))]
                    client_addr, req_id = job_info.split(b'#')
                    send_ndarray(sender, client_addr,
                                 np.concatenate(tmp, axis=0), req_id)
                    pending_result.pop(job_info)
                    pending_checksum.pop(job_info)
                    job_checksum.pop(job_info)

            if socks.get(frontend) == zmq.POLLIN:
                client_addr, msg_type, msg_info, req_id = frontend.recv_multipart(
                )
                if msg_type == ServerCommand.new_job:
                    job_info = client_addr + b'#' + req_id
                    job_checksum[job_info] = int(msg_info)
                    logger.info('job register\tsize: %d\tjob id: %s' %
                                (int(msg_info), job_info))
                elif msg_type == ServerCommand.show_config:
                    time.sleep(
                        0.1
                    )  # dirty fix of slow-joiner: sleep so that client receiver can connect.
                    logger.info('send config\tclient %s' % client_addr)
                    sender.send_multipart([client_addr, msg_info, req_id])
示例#50
0
                obj[squash_unicode(key)] = obj.pop(key)
    elif isinstance(obj, list):
        for i, v in enumerate(obj):
            obj[i] = squash_unicode(v)
    elif isinstance(obj, unicode):
        obj = obj.encode('utf8')
    return obj


#-----------------------------------------------------------------------------
# globals and defaults
#-----------------------------------------------------------------------------

key = 'on_unknown' if jsonapi.jsonmod.__name__ == 'jsonlib' else 'default'
json_packer = lambda obj: jsonapi.dumps(obj, **{key: date_default})
json_unpacker = lambda s: extract_dates(jsonapi.loads(s))

pickle_packer = lambda o: pickle.dumps(o, -1)
pickle_unpacker = pickle.loads

default_packer = json_packer
default_unpacker = json_unpacker

DELIM = b"<IDS|MSG>"

#-----------------------------------------------------------------------------
# Mixin tools for apps that use Sessions
#-----------------------------------------------------------------------------

session_aliases = dict(
    ident='Session.session',
示例#51
0
 def on_new_data(self, topic, headers, message, match):
     data = jsonapi.loads(message[0])
     self.data_queue.notify_all(data)
示例#52
0
from crate_historian import crate_utils
from volttron.platform.agent import utils
from volttron.platform.dbutils import mongoutils

logging.basicConfig(level=logging.DEBUG)
_log = logging.getLogger(__name__)

for key in logging.Logger.manager.loggerDict:
    _log.debug(key)

logging.getLogger('crate.client.http').setLevel(logging.INFO)
logging.getLogger('urllib3.connectionpool').setLevel(logging.INFO)

root = os.path.dirname(os.path.abspath(__file__))
with open('{}/crate_config'.format(root), 'r') as fp:
    crate_params = jsonapi.loads(fp.read())

root = os.path.dirname(os.path.abspath(__file__))
with open('{}/mongo_config'.format(root), 'r') as fp:
    mongo_params = jsonapi.loads(fp.read())

MAX_QUEUE_SIZE = 50000
QUEUE_BATCH_SIZE = 5000


class TableQueue(Queue.Queue, object):
    def __init__(self, table_name):
        super(TableQueue, self).__init__()
        self.table_name = table_name

示例#53
0
                        break
            if not should_parse_json:
                sys.stderr.write(
                    "Invalid json file detected, must start with { or [ character.\n"
                )
                sys.exit(1)

            # Yaml failed for some reason, could be invalid yaml or could
            # have embedded invalid character in a json file.  So now we
            # are going to try to deal with json here.

            tmpconfigfile = tempfile.NamedTemporaryFile()
            opts.config.seek(0)
            with open(tmpconfigfile.name, 'w') as fout:

                for line in opts.config:
                    line = line.partition('#')[0]
                    if line.rstrip():
                        fout.write(line.rstrip() + "\n")
            config_file = tmpconfigfile.name
            try:
                with open(tmpconfigfile.name) as f:
                    opts.config = jsonapi.loads(f.read())
            finally:
                tmpconfigfile.close()

    if opts.config:
        install_agent(opts, opts.package, opts.config)
    else:
        install_agent(opts, opts.package, {})
示例#54
0
    def capture_data(self, peer, sender, bus, topic, headers, message, device):

        timestamp_string = headers.get(headers_mod.DATE)
        timestamp, my_tz = process_timestamp(timestamp_string)

        try:
            # 2.0 agents compatability layer makes sender == pubsub.compat so
            # we can do the proper thing when it is here
            if sender == 'pubsub.compat':
                message = jsonapi.loads(message[0])

            if isinstance(message, dict):
                values = message
            else:
                values = message[0]

        except ValueError as e:
            _log.error(
                "message for {topic} bad message string: {message_string}".
                format(topic=topic, message_string=message[0]))
            return
        except IndexError as e:
            _log.error("message for {topic} missing message string".format(
                topic=topic))
            return
        except Exception as e:
            _log.exception(e)
            return

        meta = {}
        try:
            # 2.0 agents compatability layer makes sender == pubsub.compat so
            # we can do the proper thing when it is here
            if sender == 'pubsub.compat':
                if isinstance(message[1], str):
                    meta = jsonapi.loads(message[1])

            if not isinstance(message, dict):
                meta = message[1]

        except ValueError as e:
            _log.warning(
                "meta data for {topic} bad message string: {message_string}".
                format(topic=topic, message_string=message[0]))
        except IndexError as e:
            _log.warning("meta data for {topic} missing message string".format(
                topic=topic))

        if topic.startswith('analysis'):
            source = 'analysis'
        else:
            source = 'scrape'
        _log.debug("Queuing {topic} from {source} for publish".format(
            topic=topic, source=source))

        for key, value in values.iteritems():
            point_topic = device + '/' + key
            self._event_queue.put({
                'source': source,
                'topic': point_topic,
                'readings': [(timestamp, value)],
                'meta': meta.get(key, {})
            })
示例#55
0
    def _run(self, receiver, frontend, sender):

        receiver_addr = auto_bind(receiver)
        frontend.connect(self.nav_to_sink_addr)
        sender.bind('tcp://*:%d' % self.port)

        poller = zmq.Poller()
        poller.register(frontend, zmq.POLLIN)
        poller.register(receiver, zmq.POLLIN)

        # send worker receiver address back to frontend
        frontend.send(receiver_addr.encode('ascii'))

        # Windows does not support logger in MP environment, thus get a new logger
        # inside the process for better compability
        logger = set_logger(colored('SINK', 'green'),
                            logger_dir=self.logdir,
                            verbose=self.verbose)
        logger.info('ready')
        self.is_ready.set()

        sink_status = ServerStatistic()

        while not self.exit_flag.is_set():
            try:
                socks = dict(poller.poll())

                if socks.get(receiver) == zmq.POLLIN:
                    client, req_id, msg, msg_info = recv_from_prev_raw(
                        receiver)
                    logger.info("collected {}#{}".format(client, req_id))

                    send_to_next_raw(client, req_id, msg, msg_info, sender)
                    self.current_jobnum -= 1
                    self.total_processed += 1
                    logger.info('send back\tjob id: {}#{} \tleft: {}'.format(
                        client, req_id, self.current_jobnum))

                if socks.get(frontend) == zmq.POLLIN:
                    client_addr, msg_type, msg_info, req_id = frontend.recv_multipart(
                    )
                    if msg_type == ServerCmd.new_job:
                        job_id = client_addr + b'#' + req_id
                        self.current_jobnum += 1
                        self.maximum_jobnum = self.current_jobnum if self.current_jobnum > self.maximum_jobnum else self.maximum_jobnum
                        logger.info(
                            'registed job\tjob id: {}\tleft: {}'.format(
                                job_id, self.current_jobnum))

                    elif msg_type == ServerCmd.show_config:
                        time.sleep(
                            0.1
                        )  # dirty fix of slow-joiner: sleep so that client receiver can connect.
                        logger.info('send config\tclient %s' % client_addr)
                        prev_status = jsonapi.loads(msg_info)
                        status = {
                            'statistic_postsink': {
                                **{
                                    'total_job_in_queue':
                                    self.current_jobnum,
                                    'maximum_job_in_queue':
                                    self.maximum_jobnum,
                                    'total_processed_job':
                                    self.total_processed,
                                    'util':
                                    self.current_jobnum / (self.maximum_jobnum) if self.maximum_jobnum > 0 else 0
                                },
                                **sink_status.value
                            }
                        }
                        send_to_next('obj', client_addr, req_id, {
                            **prev_status,
                            **status
                        }, sender)

            except Exception as e:
                import traceback
                traceback.print_exc()
                tb = traceback.format_exc()
                logger.error('{}\n{}'.format(e, tb))
示例#56
0
    def handle_subsystem(self, frames):
        """
         Handler for incoming routing table frames. It calls appropriate action handler based on operation request.
        :param frames list of frames
        :type frames list
        :returns: response frame to be sent back to the sender
        :rtype: list

        :Return Values:
        response frame to be sent back to the sender
        """
        response = []
        result = False

        try:
            sender, recipient, proto, usr_id, msg_id, subsystem, op = frames[:
                                                                             7]
        except IndexError:
            return False
        subsystem = bytes(subsystem)
        op = bytes(op)
        # for f in frames:
        #     _log.debug("ROUTINGSERVICE handle subsystem {}".format(bytes(f)))

        if subsystem == b'routing_table':
            #If Setup mode of operation, setup authorization
            if op == b'setupmode_platform_connection':
                instance_config = bytes(frames[7])
                instance_config = jsonapi.loads(instance_config)
                self._setup_authorization(instance_config)
            # If Normal mode of operation, build authorized connection
            elif op == b'normalmode_platform_connection':
                instance_config = bytes(frames[7])
                instance_config = jsonapi.loads(instance_config)
                self._build_connection(instance_config)
                return False
            #Respond to Hello/Welcome messages from other instances
            elif op == b'hello':
                handshake_request = bytes(frames[7])
                try:
                    #Respond to 'hello' request with 'welcome'
                    if handshake_request == b'hello':
                        name = bytes(frames[8])
                        frames.pop(0)
                        _log.debug(
                            "HELLO Recieved hello, sending welcome to {}".
                            format(name))
                        frames[6] = 'welcome'
                        frames[7] = self._my_instance_name
                        try:
                            _log.debug(
                                "Sending welcome message to sender {}".format(
                                    name))
                            self.send_external(name, frames)
                        except ZMQError as exc:
                            _log.error("ZMQ error: ")
                    #Respond to 'welcome' response by sending Pubsub subscription list
                    elif handshake_request == b'welcome':
                        name = bytes(frames[8])
                        _log.debug(
                            "HELLO Received welcome. Connection established with: {}"
                            .format(name))
                        try:
                            self._instances[name]['status'] = STATUS_CONNECTED
                            self._onconnect_pubsub_handler(name)
                        except KeyError as exc:
                            _log.error(
                                "Welcome message received from unknown platform: {}"
                                .format(name))
                except IndexError as exc:
                    _log.error(
                        "Insufficient frames in hello message {}".format(exc))
            elif op == b"web-addresses":
                self._web_addresses = bytes(frames[7])
                self._web_addresses = jsonapi.loads(self._web_addresses)
            #Update routing table entry
            elif op == b'update':
                result = self._update_entry(frames)
            elif op == b'request_response':
                pass
            else:
                _log.error("Unknown operation: {}".format(op))
        if result:
            #Form response frame
            response = [sender, recipient, proto, usr_id, msg_id, subsystem]
            response.append(zmq.Frame(b'request_response'))
            response.append(zmq.Frame(bytes(result)))
        else:
            response = False

        return response
示例#57
0
 def on_message(self, msg):
     if len(msg) < self.max_msg_size:
         msg = jsonapi.loads(msg)
         self.session.send(self.shell_stream, msg)
示例#58
0
 def deserialze_data(self, data):
     return jsonapi.loads(data, object_hook=utils.json_numpy_obj_hook)
示例#59
0
def decodeR3Message(multipart_msg):
    try:
        return (multipart_msg[0], json.loads(multipart_msg[1]))
    except Exception as e:
        logging.debug("decodeR3Message:" + str(e))
        return ("", {})
示例#60
0
 def _recv_ndarray(self, wait_for_req_id=None):
     request_id, response = self._recv(wait_for_req_id)
     arr_info, arr_val = jsonapi.loads(response[1]), response[2]
     X = np.frombuffer(_buffer(arr_val), dtype=str(arr_info['dtype']))
     return Response(request_id, self.formatter(X.reshape(arr_info['shape'])), arr_info.get('tokens', ''))