Exemple #1
0
    def update_status(self, status, context=None):
        """
        Updates the internal state of the `Status` object.

        This method will throw errors if the context is not serializable or
        if the status parameter is not within the ACCEPTABLE_STATUS tuple.

        :param status:
        :param context:
        :return:
        """
        if status not in ACCEPTABLE_STATUS:
            raise ValueError('Invalid status value {}'.format(status))
        try:
            jsonapi.dumps(context)
        except TypeError:
            raise ValueError('Context must be JSON serializable.')

        status_changed = status != self._status
        self._status = status
        self._context = context
        self._last_updated = format_timestamp(get_aware_utc_now())

        if status_changed and self._status_changed_callback:
            print(self._status_changed_callback())
Exemple #2
0
    def insert_aggregate(self, agg_topic_id, agg_type, period, ts,
                         data, topic_ids):
        """
        Insert aggregate data collected for a specific  time period into
        database. Data is inserted into <agg_type>_<period> table

        :param agg_topic_id: topic id
        :param agg_type: type of aggregation
        :param period: time period of aggregation
        :param ts: end time of aggregation period (not inclusive)
        :param data: computed aggregate
        :param topic_ids: topic ids or topic ids for which aggregate was
                          computed
        :return: True if execution was successful, False otherwise
        """

        if not self.__connect():
            print("connect to database failed.......")
            return False
        table_name = agg_type + '_' + period
        _log.debug("Inserting aggregate: {} {} {} {} into table {}".format(
            ts, agg_topic_id, jsonapi.dumps(data), str(topic_ids), table_name))
        self.__cursor.execute(
            self.insert_aggregate_stmt(table_name),
            (ts, agg_topic_id, jsonapi.dumps(data), str(topic_ids)))
        self.commit()
        return True
Exemple #3
0
    def capture_analysis_data(self, peer, sender, bus, topic, headers, message):
        '''Capture device data and submit it to be published by a historian.
        
        Filter out only the */all topics for publishing to the historian.
        '''
        
        if topic.endswith("/all") or '/all/' in topic:
#             _log.debug("Unmatched topic: {}".format(topic))
            return
        
        # Because of the above if we know that all is in the topic so
        # we strip it off to get the base device
#         parts = topic.split('/')
        parts = topic.split('/')
        device = '/'.join(parts[1:-1]) #'/'.join(reversed(parts[2:]))
        
        _log.debug("found topic {}".format(topic))
        
        if topic.endswith('Timestamp'):
            pass
        
        try:
            value = float(message[0])
        except:
            value = message[0]
        # Because message is a single point we need to have the point in the
        # message
        real_message = {parts[-1]: value}
        real_message = [jsonapi.dumps(real_message), jsonapi.dumps({})]
        
        self.capture_data(peer, sender, bus, topic, headers, real_message, device)
Exemple #4
0
	def send(self, envelope, msg, error=False):
		if error:
			msg = jsonapi.dumps({'error': msg})
		else:
			# FIXME: exception handling should be better done
			# but there are too many json libraries out there
			try: msg = jsonapi.dumps({'result': msg})
			except Exception:
				msg = jsonapi.dumps({'proxy': repr(msg)})

		envelope.append(msg)
		return self.sock.send_multipart(envelope)
Exemple #5
0
 def process_multiframe_response(self, env, multipart_message):
     timestamp, command, args, kwargs, result, error_str, error = map(
             self.deserialize_frame, multipart_message
     )
     data = {'timestamp': timestamp, 'command': command, 'args': args,
             'kwargs': kwargs, 'result': result, 'error_str': error_str,
             'error': error}
     data = self._filter_multiframe_response_fields(data)
     try:
         json_data = jsonapi.dumps(data)
     except Exception, e:
         import traceback
         data = {'result': None, 'error_str': traceback.format_exc()}
         json_data = jsonapi.dumps(data)
Exemple #6
0
    def call(self, cmd):
        if isinstance(cmd, string_types):
            raise DeprecationWarning('call() takes a mapping')

        call_id = uuid.uuid4().hex
        cmd['id'] = call_id
        try:
            cmd = json.dumps(cmd)
        except ValueError as e:
            raise CallError(str(e))

        try:
            yield tornado.gen.Task(self.stream.send, cmd)
        except zmq.ZMQError as e:
            raise CallError(str(e))

        while True:
            messages = yield tornado.gen.Task(self.stream.on_recv)
            for message in messages:
                try:
                    res = json.loads(message)
                    if res.get('id') != call_id:
                        # we got the wrong message
                        continue
                    raise tornado.gen.Return(res)
                except ValueError as e:
                    raise CallError(str(e))
    def _set_override_off(self, pattern):
        pattern = pattern.lower()
        # If pattern exactly matches
        if pattern in self._override_patterns:
            self._override_patterns.discard(pattern)
            # Cancel any pending override events
            self._cancel_override_events(pattern)
            self._override_devices.clear()
            patterns = dict()
            # Build override devices list again
            for pat in self._override_patterns:
                for device in self.instances:
                    device = device.lower()
                    if fnmatch.fnmatch(device, pat):
                        self._override_devices.add(device)

                if self._override_interval_events[pat] is None:
                    patterns[pat] = str(0.0)
                else:
                    evt, end_time = self._override_interval_events[pat]
                    patterns[pat] = utils.format_timestamp(end_time)

            self.vip.config.set("override_patterns", jsonapi.dumps(patterns))
        else:
            _log.error("Override Pattern did not match!")
            raise OverrideError(
                "Pattern {} does not exist in list of override patterns".format(pattern))
Exemple #8
0
def jsonify(item):
	"""
	Serializes an object into *optimized* JSON (meaning no whitespace is used).

	"""

	return jsonapi.dumps(item, separators = (",", ":"))
Exemple #9
0
    def handle_subsystem(self, frames, user_id):
        subsystem = bytes(frames[5])
        if subsystem == b'quit':
            sender = bytes(frames[0])
            if sender == b'control' and user_id == self.default_user_id:
                raise KeyboardInterrupt()
        elif subsystem == b'query':
            try:
                name = bytes(frames[6])
            except IndexError:
                value = None
            else:
                if name == b'addresses':
                    _log.debug("ADDRESS {}".format(self.addresses))
                    # #140
#                     value = [addr.base for addr in self.addresses]
                    if self.addresses:
                        value = self.addresses
                    else:
                        value = [self.local_address]
                else:
                    value = None
            frames[6:] = [b'', jsonapi.dumps(value)]
            frames[3] = b''
            return frames
Exemple #10
0
 def registerRequest(self, kc, msg):
     name = None
     if "mode" not in msg["content"] or msg["content"]["mode"] in ("sage", "python"):
         self.waiting[msg["header"]["msg_id"]] = kc
         self.session.send(self.stream, msg)
         return
     elif msg["content"]["mode"] in trait_names:
         line = msg["content"]["line"][:msg["content"]["cursor_pos"]]
         name = Completer.name_pattern.search(line)
     response = {
         "header": {
             "msg_id": str(uuid.uuid4()),
             "username": "",
             "session": self.kernel_id,
             "msg_type": "complete_reply"
         },
         "parent_header": msg["header"],
         "metadata": {}
     }
     if name is not None:
         response["content"] = {
             "matches": [t for t in trait_names[msg["content"]["mode"]] if t.startswith(name.group())],
             "matched_text": name.group()
         }
     else:
         response["content"] = {
             "matches": [],
             "matched_text": []
         }
     kc.send("complete/shell," + jsonapi.dumps(response))
Exemple #11
0
def test_store_list_get_configuration(vc_vcp_platforms):
    vc, vcp = vc_vcp_platforms

    data = dict(
        bim=50,
        baz="foo",
        bar="lambda"
    )
    str_data = jsonapi.dumps(data)
    identity = "foo.bar"
    config_name = "fuzzywidgets"
    api = APITester(vc.jsonrpc_endpoint)

    platforms = api.list_platforms()
    platform_uuid = platforms[0]["uuid"]

    resp = api.store_agent_config(platform_uuid, identity, config_name,
                                  str_data)
    assert resp is None

    resp = api.list_agent_configs(platform_uuid, identity)
    assert config_name == resp[0]

    resp = api.get_agent_config(platform_uuid, identity, config_name)
    assert str_data == resp
Exemple #12
0
 def get(self):
     nbm = self.application.notebook_manager
     km = self.application.kernel_manager
     files = nbm.list_notebooks()
     for f in files :
         f['kernel_id'] = km.kernel_for_notebook(f['notebook_id'])
     self.finish(jsonapi.dumps(files))
Exemple #13
0
 def registerRequest(self, kc, msg):
     mode = msg["content"].get("mode", "sage")
     if mode in ("sage", "python"):
         self.waiting[msg["header"]["msg_id"]] = kc
         self.session.send(self.stream, msg)
         return
     match = Completer.name_pattern.search(
         msg["content"]["line"][:msg["content"]["cursor_pos"]])
     response = {
         "channel": "shell",
         "header": {
             "msg_id": str(uuid.uuid4()),
             "username": "",
             "session": self.kernel_id,
             "msg_type": "complete_reply"
         },
         "parent_header": msg["header"],
         "metadata": {},
         "content": {
             "matches": [t for t in tab_completion.get(mode, [])
                         if t.startswith(match.group())],
             "cursor_start": match.start(),
         },
     }
     kc.send("complete," + jsonapi.dumps(response))
Exemple #14
0
 def create_message_and_publish(self, topic, message):
     headers = {
         headers_mod.FROM: AGENT_ID,
         headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON,
         headers_mod.DATE: datetime.datetime.today().isoformat(),
     }
     self.vip.pubsub.publish("pubsub", topic, headers, jsonapi.dumps(message))
Exemple #15
0
def do_rpc(method, params=None, auth_token=None, rpc_root=None):
    """ A utility method for calling json rpc based funnctions.

    :param method: The method to call
    :param params: the parameters to the method
    :param auth_token: A token if the user has one.
    :param rpc_root: Root of jsonrpc api.
    :return: The result of the rpc method.
    """

    assert rpc_root, "Must pass a jsonrpc url in to the function."

    json_package = {
        'jsonrpc': '2.0',
        'id': '2503402',
        'method': method,
    }

    if auth_token:
        json_package['authorization'] = auth_token

    if params:
        json_package['params'] = params

    data = jsonapi.dumps(json_package)

    return requests.post(rpc_root, data=data)
Exemple #16
0
 def post(self):
     km = self.application.kernel_manager
     notebook_id = self.get_argument('notebook', default=None)
     kernel_id = km.start_kernel(notebook_id)
     data = {'ws_url':self.ws_url,'kernel_id':kernel_id}
     self.set_header('Location', '/'+kernel_id)
     self.finish(jsonapi.dumps(data))
Exemple #17
0
    def _on_platform_message(self,peer, sender, bus, topic, headers, message):
        """
        Callback function for vcp agent to publish to.

        Platforms that are being managed should publish to this topic with
        the agent_list and other interesting things that the volttron
        central shsould want to know.
        """
        self._log.debug('ON PLATFORM MESSAGE!')
        expected_prefix = "platforms/{}/".format(self.vip_identity)

        if not topic.startswith(expected_prefix):
            self._log.warn(
                "Unexpected topic published to stats function: {}".format(
                    topic
                ))
            return

        self._log.debug("TOPIC WAS: {}".format(topic))
        self._log.debug("MESSAGE WAS: {}".format(message))
        self._log.debug("Expected topic: {}".format(expected_prefix))
        self._log.debug(
            "Are Equal: {}".format(topic.startswith(expected_prefix)))
        self._log.debug("topic type: {} prefix_type: {}".format(type(topic),
                                                                type(
                                                                    expected_prefix)))

        # Pull off the "real" topic from the prefix
        # topic = topic[len(expected_prefix):]

        topicsplit = topic.split('/')
        if len(topicsplit) < 2:
            self._log.error('Invalid topic length published to volttron central')
            return

        # Topic is platforms/<platform_uuid>/otherdata
        topicsplit = topic.split('/')

        if len(topicsplit) < 3:
            self._log.warn("Invalid topic length no operation or datatype.")
            self._log.warn("Topic was {}".format(topic))
            return

        _, platform_uuid, op_or_datatype, other = topicsplit[0], \
                                                  topicsplit[1], \
                                                  topicsplit[2], topicsplit[
                                                                 3:]

        if op_or_datatype in ('iam', 'configure'):
            if not other:
                self._log.error("Invalid response to iam or configure endpoint")
                self._log.error(
                    "the sesson token was not included in response from vcp.")
                return

            ws_endpoint = "/vc/ws/{}/{}".format(other[0], op_or_datatype)
            self._log.debug('SENDING MESSAGE TO {}'.format(ws_endpoint))
            self._vc.vip.web.send(ws_endpoint, jsonapi.dumps(message))
        else:
            self._log.debug("OP WAS: {}".format(op_or_datatype))
    def _backup_new_to_publish(self, new_publish_list):
        _log.debug("Backing up unpublished values.")
        c = self._connection.cursor()

        for item in new_publish_list:
            source = item['source']
            topic = item['topic']
            meta = item.get('meta', {})
            values = item['readings']

            topic_id = self._backup_cache.get(topic)

            if topic_id is None:
                c.execute('''INSERT INTO topics values (?,?)''', (None, topic))
                c.execute('''SELECT last_insert_rowid()''')
                row = c.fetchone()
                topic_id = row[0]
                self._backup_cache[topic_id] = topic
                self._backup_cache[topic] = topic_id

            for name, value in meta.iteritems():
                c.execute('''INSERT OR REPLACE INTO metadata values(?, ?, ?, ?)''',
                            (source,topic_id,name,value))
                self._meta_data[(source,topic_id)][name] = value


            
            for timestamp, value in values:
                c.execute('''INSERT OR REPLACE INTO outstanding values(NULL, ?, ?, ?, ?)''',
                          (timestamp,source,topic_id,jsonapi.dumps(value)))

        self._connection.commit()
Exemple #19
0
def groupofatoms(groupofatoms):
    '''
    Serialises a MMTK protein.
    '''
    # Prepare the data and configuration.
    config = None
    universe = groupofatoms.universe()
    if universe is not None:
        config = universe.contiguousObjectConfiguration([groupofatoms])

    # Serialise the data.
    buffer = StringIO()
    file = PDBOutputFile(buffer)
    file.write(groupofatoms, config)

    # Retrieve the content.
    pdb = buffer.getvalue()
    file.close()

    # Store it in the json object that is sent to javascript.
    result = {'pdb': pdb}

    # Specify the javascript handler.
    result['handler'] = 'GroupOfAtoms'

    return jsonapi.dumps(result)
Exemple #20
0
 def start_dr_event(self):
     self.state = 'DR_EVENT'
     self.publish(topics.ACTUATOR_SET(point=volttron_flag, **rtu_path), self.headers, str(3.0))
     self.publish(topics.ACTUATOR_SET(point=cooling_stpt, **rtu_path), self.headers, str(self.csp_cpp))
     
     new_fan_speed = self.normal_firststage_fanspeed - (self.normal_firststage_fanspeed*fan_reduction)
     new_fan_speed = max(new_fan_speed,0)
     self.publish(topics.ACTUATOR_SET(point=cooling_fan_sp1, **rtu_path), self.headers, str(new_fan_speed))
     
     new_fan_speed = self.normal_secondstage_fanspeed - (self.normal_firststage_fanspeed*fan_reduction)
     new_fan_speed = max(new_fan_speed,0)
     self.publish(topics.ACTUATOR_SET(point=cooling_fan_sp2, **rtu_path), self.headers, str(new_fan_speed))            
     
     self.publish(topics.ACTUATOR_SET(point=min_damper_stpt, **rtu_path), self.headers, str(damper_cpp))
     self.publish(topics.ACTUATOR_SET(point=cooling_stage_diff, **rtu_path), self.headers, str(cooling_stage_differential))
     mytime = int(time.time())
     content = {
         "Demand Response Event": {
              "Readings": [[mytime, 1.0]],
              "Units": "TU",
              "data_type": "double"
          }
     }
     self.publish(self.smap_path, self.headers, jsonapi.dumps(content))    
     def backup_run():
         self.start_dr_event()
         self.error_handler = None
         
     self.error_handler = backup_run
Exemple #21
0
    def __init__(self, init_state, host):
        self.state = init_state
        
        context = zmq.Context()
        
        self.publisher = context.socket(zmq.PUB)
        self.publisher.bind('tcp://*:{}'.format(IO.STATE))
        
        self.event = context.socket(zmq.PUB)
        self.event.bind('tcp://*:{}'.format(IO.EVENT))

        snapshot = context.socket(zmq.ROUTER)
        snapshot.bind('tcp://*:{}'.format(IO.SNAPSHOT))

        self.association = context.socket(zmq.REQ)
        self.association.connect('tcp://{}:{}'.format(host, IO.ASSOCIATION))

        incoming = context.socket(zmq.PULL)
        incoming.bind('tcp://*:{}'.format(IO.EXTERNAL))

        poller = zmq.Poller()
        poller.register(incoming, zmq.POLLIN)
        poller.register(snapshot, zmq.POLLIN)

        while True:
            events = dict(poller.poll())
            
            if incoming in events:
                self.parse(incoming.recv_json())
                 
            if snapshot in events:
                address, _, message = snapshot.recv_multipart()
                snapshot.send_multipart([ address, 
                                          b'',
                                          dumps(self.state) ])
Exemple #22
0
 def on_recv(self, msg):
     msg = self.session.feed_identities(msg)[1]
     msg = self.session.unserialize(msg)
     msg_id = msg["parent_header"]["msg_id"]
     kc = self.waiting.pop(msg_id)
     del msg["header"]["date"]
     kc.send("complete/shell," + jsonapi.dumps(msg))
Exemple #23
0
        def write_status(self):
            historian_present = False

            try:
                ping = self.vip.ping('platform.historian', 'awake?').get(timeout=2)
                historian_present = True
            except Unreachable:
                _log.warning('platform.historian unavailable no logging of data will occur.')
                return
            _log.debug('publishing data')
            base_topic = 'datalogger/log/platform/status'
            cpu = base_topic + '/cpu'
            virtual_memory = base_topic + "/virtual_memory"
            disk_partitions = base_topic + "/disk_partiions"

            points = {}

            for k, v in psutil.cpu_times_percent().__dict__.items():
                points['times_percent/' + k] = {'Readings': v,
                                                'Units': 'double'}

            points['percent'] = {'Readings': psutil.cpu_percent(),
                                 'Units': 'double'}

            message = jsonapi.dumps(points)
            self.vip.pubsub.publish(peer='pubsub',
                                    topic=cpu,
                                    message=points)
Exemple #24
0
    def routerRecv(self, message):
        """
        message = [ ... , request, image/blank]

        request = {'timestamp': timestamp, 
                   'task': 'detection'/'recognition'/'tracking',
                   'parameters': (...)}
        """
        request = loads(message[-2])

        if request["task"] == "detection":
            self.logger.debug("start detection")
            with open("image.jpg", "wb") as f:
                f.write(message[-1])
            sleep = random.randint(1, 2)  # detection
            time.sleep(sleep)
            message[-2] = dumps("detection")
            message[-1] = ""
            self.rtr.send_multipart(message)

        elif request["task"] == "tracking":
            self.logger.debug("prepare to tracking")
            message[-1] = "finish"
            tracker.Tracker(self.rtr, message)
        else:
            self.logger.debug("requested task is not supported")
Exemple #25
0
 def registerRequest(self, addr, msg):
     content = msg["content"]
     mode = content.get("mode", "sage")
     if mode in ("sage", "python"):
         self.waiting[msg["header"]["msg_id"]] = addr
         if self.kernel is None:
             # It is highly unlikely that we get a completion request before
             # the kernel is ready, so we are not going to handle it.
             logger.exception("completer kernel is not available")
         self.kernel.session.send(self.kernel.channels["shell"], msg)
         return
     match = Completer.name_pattern.search(
         content["line"][:content["cursor_pos"]])
     response = {
         "channel": "shell",
         "header": {
             "msg_id": str(uuid.uuid4()),
             "username": "",
             "session": self.kernel.id,
             "msg_type": "complete_reply"
         },
         "parent_header": msg["header"],
         "metadata": {},
         "content": {
             "matches": [t for t in tab_completion.get(mode, [])
                         if t.startswith(match.group())],
             "cursor_start": match.start(),
         },
     }
     addr.send("complete," + jsonapi.dumps(response))
Exemple #26
0
 def send(self, socket):
     """Send key-value message to socket; any empty frames are sent as such."""
     key = "" if self.key is None else self.key
     seq_s = struct.pack("!q", self.sequence)
     body = "" if self.body is None else self.body
     prop_s = json.dumps(self.properties)
     socket.send_multipart([key, seq_s, self.uuid, prop_s, body])
Exemple #27
0
 def configure_plain(self, domain='*', passwords=None):
     '''
     Configure PLAIN authentication for a given domain. PLAIN authentication
     uses a plain-text password file. To cover all domains, use "*".
     You can modify the password file at any time; it is reloaded automatically.
     '''
     self.pipe.send_multipart([b'PLAIN', b(domain, self.encoding), jsonapi.dumps(passwords or {})])
Exemple #28
0
 def publish_to_smap(self, smap_identifier, afdd_msg, smap_energyid, energy_impact):
     '''
     Push diagnostic results and energy
     impact to sMAP historian
     '''
     self._log.debug(''.join(['Push to sMAP - ', smap_identifier, str(afdd_msg),
                              ' Energy Impact: ', str(energy_impact)]))
     mytime = int(time.time())
     if smap_energyid is not None:
         content = {
             smap_identifier: {
                  "Readings": [[mytime, afdd_msg]],
                  "Units": "TU",
                  "data_type": "double"
              },
               smap_energyid: {
                  "Readings": [[mytime, energy_impact]],
                  "Units": "kWh",
                  "data_type": "double"}
          }
     else:
         content = {
             smap_identifier: {
                  "Readings": [[mytime, afdd_msg]],
                  "Units": "TU",
                  "data_type": "double"
              }
         }
     self._agent.publish(self.smap_path, self.headers, jsonapi.dumps(content))
Exemple #29
0
 def get(self):
     nbm = self.notebook_manager
     km = self.kernel_manager
     files = nbm.list_notebooks()
     for f in files:
         f["kernel_id"] = km.kernel_for_notebook(f["notebook_id"])
     self.finish(jsonapi.dumps(files))
Exemple #30
0
    def _set_override_off(self, pattern):
        """Turn off override condition on all devices matching the pattern. It removes the pattern from the override
        patterns set, clears the list of overriden devices  and reevaluates the state of devices. It then cancels the
        pending override event and removes pattern from the config store.
        :param pattern: Override pattern to be removed.
        :type pattern: str
        """

        pattern = pattern.lower()

        # If pattern exactly matches
        if pattern in self._override_patterns:
            self._override_patterns.discard(pattern)
            # Cancel any pending override events
            self._cancel_override_events(pattern)
            self._override_devices.clear()
            patterns = dict()
            # Build override devices list again
            for pat in self._override_patterns:
                for device in self.instances:
                    device = device.lower()
                    if fnmatch.fnmatch(device, pat):
                        self._override_devices.add(device)

                if self._override_interval_events[pat] is None:
                    patterns[pat] = str(0.0)
                else:
                    evt, end_time = self._override_interval_events[pat]
                    patterns[pat] = utils.format_timestamp(end_time)

            self.vip.config.set("override_patterns", jsonapi.dumps(patterns))
        else:
            _log.error("Override Pattern did not match!")
            raise OverrideError(
                "Pattern {} does not exist in list of override patterns".format(pattern))
    def _run(self, _, frontend, sink, *backend_socks):
        def push_new_job(client, req_id, msg_raw, msg_info_raw):
            _sock = rand_backend_socket
            send_to_next_raw(client, req_id, msg_raw, msg_info_raw, _sock)

        # bind all sockets
        self.logger.info('bind all sockets')
        frontend.bind('tcp://*:%d' % self.port)
        addr_front2sink = auto_bind(sink)

        addr_backend_post_list = [auto_bind(b) for b in backend_socks]
        self.logger.info('open %d worker sockets' %
                         len(addr_backend_post_list))

        # start the sink process
        self.logger.info('start the sink')
        proc_postsink = WKRSink(self.args, addr_front2sink,
                                addr_backend_post_list)
        self.processes.append(proc_postsink)
        proc_postsink.start()
        addr_sink = sink.recv().decode('ascii')

        # start the post-backend processes
        # WaveWorker: self, id, args, worker_address_list, sink_address, device_id
        self.logger.info('start main-workers')
        device_map_main_worker = self._get_device_map(self.num_worker,
                                                      self.device_map,
                                                      self.gpu_memory_fraction,
                                                      run_all_cpu=self.all_cpu)
        for idx, device_id in enumerate(device_map_main_worker):
            process = self.hardprocessor_skeleton(idx, self.args,
                                                  addr_backend_post_list,
                                                  addr_sink, device_id)
            self.processes.append(process)
            process.start()
            # process.is_ready.wait() # start model sequencely

        # start the http-service process
        if self.args.http_port:
            self.logger.info('start http proxy')
            proc_proxy = BertHTTPProxy(self.args)
            self.processes.append(proc_proxy)
            proc_proxy.start()

        rand_backend_socket = None
        server_status = ServerStatistic()

        for p in self.processes:
            p.is_ready.wait()

        self.is_ready.set()
        self.logger.info('all set, ready to serve request!')

        while True:
            try:
                request = frontend.recv_multipart()
                client, req_id, msg, msg_info = request
                # client, req_id, msg, msg_info = recv_from_prev(self.transfer_protocol, frontend)
                # request = [client, msg, req_id, msg_info]
            except (ValueError, AssertionError):
                self.logger.error(
                    'received a wrongly-formatted request (expected 4 frames, got %d)'
                    % len(request))
                self.logger.error('\n'.join('field %d: %s' % (idx, k)
                                            for idx, k in enumerate(request)),
                                  exc_info=True)
            else:
                server_status.update(request)
                if msg == ServerCmd.terminate:
                    break
                elif msg == ServerCmd.show_config:
                    self.logger.info(
                        'new config request\treq id: %d\tclient: %s' %
                        (int(req_id), client))
                    status_runtime = {
                        'client': client.decode('ascii'),
                        'num_process': len(self.processes),
                        'navigator -> worker': addr_backend_post_list,
                        'worker -> sink': addr_sink,
                        'server_current_time': str(datetime.now()),
                        'statistic': server_status.value,
                        'main_device_map': device_map_main_worker,
                        'main_batch_size': self.batch_size,
                        'protocol': self.transfer_protocol,
                        'num_concurrent_socket': self.total_concurrent_socket
                    }
                    sink.send_multipart([
                        client, msg,
                        jsonapi.dumps({
                            **status_runtime,
                            **self.status_args,
                            **self.status_static
                        }), req_id
                    ])
                else:
                    self.logger.info(
                        'new encode request\treq id: %s\tclient: %s' %
                        (str(req_id), client))

                    # regist job
                    sink.send_multipart([
                        client, ServerCmd.new_job,
                        jsonapi.dumps({
                            'job_parts': '1',
                            'split_info': {}
                        }),
                        to_bytes(req_id)
                    ])

                    # pick random socket
                    rand_backend_socket = random.choice(
                        [b for b in backend_socks if b != rand_backend_socket])

                    # info = jsonapi.loads(msg_info)
                    # if self.transfer_protocol == 'obj':
                    #     msg = decode_object(msg, info)
                    # else:
                    #     msg = decode_ndarray(msg, info)

                    # push job
                    push_new_job(client, req_id, msg, msg_info)

        for p in self.processes:
            p.close()

        self.logger.info('terminated!')
Exemple #32
0
    def process_zmq_message(self, msg):
        #print('==== msg: %s' % str(msg))

        wcoll = cp.cgwmaincollection
        wctrl = cp.cgwmaintabuser if cp.cgwmaintabuser is not None else\
                cp.cgwmaincontrol

        try :
            for rec in msg :
                jo = json.loads(rec)
                #  jo['header'] # {'key': 'status', 'msg_id': '0918505109-317821000', 'sender_id': None}
                #  jo['body']   # {'state': 'allocated', 'transition': 'alloc'}

                if  jo['header']['key'] == 'status' :
                    body = jo['body']
                    cp.s_transition = body['transition']
                    cp.s_state      = body['state']
                    cp.s_cfgtype    = body['config_alias'] # BEAM/NO BEAM
                    cp.s_recording  = body['recording']    # True/False
                    cp.s_platform   = body.get('platform', None) # dict

                    #====
                    if wctrl is not None : wctrl.set_but_ctrls()
                    if wctrl is not None : wctrl.update_progress_bar(0, is_visible=False)
                    self.wconf.set_config_type(cp.s_cfgtype)
                    if wcoll is not None : wcoll.update_table()
                    logger.info('zmq msg transition:%s state:%s config:%s recording:%s'%\
                                (cp.s_transition, cp.s_state, cp.s_cfgtype, cp.s_recording))

                elif jo['header']['key'] == 'error' :
                    body = jo['body']
                    logger.error('received error msg: %s' % body['err_info'])
                    if wctrl is not None : wctrl.update_progress_bar(0, is_visible=False)
                    if wctrl is not None : wctrl.set_but_ctrls()

                    ## grab status directly (not from error message)
                    #status = daq_control_get_status()
                    #if status is None :
                    #    logger.warning('process_zmq_message on error: STATUS IS NOT AVAILABLE')
                    #    return

                    #transition, state, cfgtype, recording = status
                    #self.wconf.set_config_type(cfgtype)
                    #if wcoll is not None : wcoll.update_table()

                elif jo['header']['key'] == 'progress' :
                    body = jo['body']
                    logger.debug('received progress msg: %s' % str(body))
                    if wctrl is not None : 
                        v = 100*body['elapsed'] / body['total']
                        wctrl.update_progress_bar(v, is_visible=True, trans_name=body['transition'])

                else :
                    sj = json.dumps(jo, indent=2, sort_keys=False)
                    logger.debug('received jason:\n%s' % sj)
                    if wctrl is not None : wctrl.update_progress_bar(0, is_visible=False)

        except KeyError as ex:
             logger.warning('CGWMain.process_zmq_message: %s\nError: %s' % (str(msg),ex))

        except Exception as ex:
             logger.warning('CGWMain.process_zmq_message: %s\nError: %s' % (str(msg),ex))
Exemple #33
0
    def post(self):
        if 'Origin' in self.request.headers:
            self.set_header('Access-Control-Allow-Origin',
                            self.request.headers['Origin'])
            self.set_header('Access-Control-Allow-Credentials', 'true')
        if (config.get_config('requires_tos')
                and self.get_argument('accepted_tos', 'false') != 'true'):
            self.set_status(403)
            self.finish(
                'When evaluating code, you must acknowledge your acceptance '
                'of the terms of service at /static/tos.html by passing the '
                'parameter accepted_tos=true\n')
            return
        code = ''.join(self.get_arguments('code', strip=False))
        if len(code) > 65000:
            self.set_status(413)
            self.finish('Max code size is 65000 characters')
            return
        remote_ip = self.request.remote_ip
        referer = self.request.headers.get('Referer', '')
        self.kernel_id = yield tornado.gen.Task(
            self.application.km.new_session_async,
            referer=referer,
            remote_ip=remote_ip,
            timeout=0)
        sm = StatsMessage(kernel_id=self.kernel_id,
                          remote_ip=remote_ip,
                          referer=referer,
                          code=code,
                          execute_type='service')
        if remote_ip == '127.0.0.1' and self.kernel_id:
            stats_logger.debug(sm)
        else:
            stats_logger.info(sm)
        if not self.kernel_id:
            logger.error('could not obtain a valid kernel_id')
            self.set_status(503)
            self.finish()
            return
        self.zmq_handler = ZMQServiceHandler()
        self.zmq_handler.open(self.application, self.kernel_id)
        loop = tornado.ioloop.IOLoop.instance()

        def kernel_callback(msg):
            if msg['msg_type'] == 'execute_reply':
                loop.remove_timeout(self.timeout_handle)
                logger.debug('service request finished for %s', self.kernel_id)
                streams = self.zmq_handler.streams
                streams['success'] = msg['content']['status'] == 'ok'
                streams['execute_reply'] = msg['content']
                loop.add_callback(self.finish_request)

        self.zmq_handler.msg_from_kernel_callbacks.append(kernel_callback)

        def timeout_callback():
            logger.debug('service request timed out for %s', self.kernel_id)
            loop.add_callback(self.finish_request)

        self.timeout_handle = loop.call_later(30, timeout_callback)
        exec_message = {
            'channel': 'shell',
            'parent_header': {},
            'header': {
                'msg_id': str(uuid.uuid4()),
                'username': '',
                'session': self.kernel_id,
                'msg_type': 'execute_request',
            },
            'content': {
                'code':
                code,
                'silent':
                False,
                'user_expressions':
                jsonapi.loads(self.get_argument('user_expressions', '{}')),
                'allow_stdin':
                False,
            },
            'metadata': {},
        }
        self.zmq_handler.on_message(jsonapi.dumps(exec_message))
    def _run(self, _, frontend, sink, *backend_socks):
        def push_new_job(_job_id, _json_msg, _msg_len):
            # backend_socks[0] is always at the highest priority
            _sock = backend_socks[
                0] if _msg_len <= self.args.priority_batch_size else rand_backend_socket
            _sock.send_multipart([_job_id, _json_msg])

        # bind all sockets
        self.logger.info('bind all sockets')
        frontend.bind('tcp://*:%d' % self.port)
        addr_front2sink = auto_bind(sink)
        addr_backend_list = [auto_bind(b) for b in backend_socks]
        self.logger.info('open %d ventilator-worker sockets, %s' %
                         (len(addr_backend_list), ','.join(addr_backend_list)))

        # start the sink process
        # sink是用来接收上层BertWork的产出,然后发送给client
        self.logger.info('start the sink')
        proc_sink = BertSink(self.args, addr_front2sink)
        self.processes.append(proc_sink)
        proc_sink.start()
        addr_sink = sink.recv().decode('ascii')

        # start the backend processes
        # 这里启动多个进程,加载主模型
        device_map = self._get_device_map()
        for idx, device_id in enumerate(device_map):
            if self.args.mode == 'BERT':
                process = BertWorker(idx, self.args, addr_backend_list,
                                     addr_sink, device_id, self.graph_path,
                                     self.args.mode)
                self.processes.append(process)
                process.start()
            elif self.args.mode in ['NER', 'CLASS']:
                process = BertWorker(idx, self.args, addr_backend_list,
                                     addr_sink, device_id, self.graph_path,
                                     self.args.mode, self.id2label)
                self.processes.append(process)
                process.start()

        # start the http-service process
        if self.args.http_port:
            self.logger.info('start http proxy')
            proc_proxy = BertHTTPProxy(self.args)
            self.processes.append(proc_proxy)
            proc_proxy.start()

        rand_backend_socket = None
        server_status = ServerStatistic()
        while True:
            try:
                request = frontend.recv_multipart()
                client, msg, req_id, msg_len = request
            except ValueError:
                self.logger.error(
                    'received a wrongly-formatted request (expected 4 frames, got %d)'
                    % len(request))
                self.logger.error('\n'.join('field %d: %s' % (idx, k)
                                            for idx, k in enumerate(request)),
                                  exc_info=True)
            else:
                server_status.update(request)
                if msg == ServerCommand.terminate:
                    break
                elif msg == ServerCommand.show_config:
                    self.logger.info(
                        'new config request\treq id: %d\tclient: %s' %
                        (int(req_id), client))
                    status_runtime = {
                        'client': client.decode('ascii'),
                        'num_process': len(self.processes),
                        'ventilator -> worker': addr_backend_list,
                        'worker -> sink': addr_sink,
                        'ventilator <-> sink': addr_front2sink,
                        'server_current_time': str(datetime.now()),
                        'statistic': server_status.value,
                        'device_map': device_map,
                        'num_concurrent_socket': self.num_concurrent_socket
                    }

                    sink.send_multipart([
                        client, msg,
                        jsonapi.dumps({
                            **status_runtime,
                            **self.status_args,
                            **self.status_static
                        }), req_id
                    ])
                else:
                    self.logger.info(
                        'new encode request\treq id: %d\tsize: %d\tclient: %s'
                        % (int(req_id), int(msg_len), client))
                    # register a new job at sink
                    sink.send_multipart(
                        [client, ServerCommand.new_job, msg_len, req_id])

                    # renew the backend socket to prevent large job queueing up
                    # [0] is reserved for high priority job
                    # last used backennd shouldn't be selected either as it may be queued up already
                    rand_backend_socket = random.choice([
                        b for b in backend_socks[1:]
                        if b != rand_backend_socket
                    ])

                    # push a new job, note super large job will be pushed to one socket only,
                    # leaving other sockets free
                    job_id = client + b'#' + req_id
                    if int(msg_len) > self.max_batch_size:
                        seqs = jsonapi.loads(msg)
                        job_gen = ((job_id + b'@%d' % i,
                                    seqs[i:(i + self.max_batch_size)]) for i in
                                   range(0, int(msg_len), self.max_batch_size))
                        for partial_job_id, job in job_gen:
                            push_new_job(partial_job_id, jsonapi.dumps(job),
                                         len(job))
                    else:
                        push_new_job(job_id, msg, int(msg_len))

        self.logger.info('terminated!')
Exemple #35
0
 def serialize(self, json_obj):
     return jsonapi.dumps(json_obj)
Exemple #36
0
 def get(self, profile):
     cm = self.application.cluster_manager
     self.finish(jsonapi.dumps(cm.profile_info(profile)))
Exemple #37
0
 def get(self):
     cm = self.application.cluster_manager
     self.finish(jsonapi.dumps(cm.list_profiles()))
Exemple #38
0
def make_json(command, **props):
    return json.dumps(make_message(command, **props))
Exemple #39
0
    def run(self):
        num_req = 0
        run_on_gpu = False
        device_map = [-1] * self.num_worker
        if not self.args.cpu:
            try:
                import GPUtil
                num_all_gpu = len(GPUtil.getGPUs())
                avail_gpu = GPUtil.getAvailable(order='memory',
                                                limit=min(
                                                    num_all_gpu,
                                                    self.num_worker))
                num_avail_gpu = len(avail_gpu)
                if num_avail_gpu < self.num_worker:
                    self.logger.warn(
                        'only %d out of %d GPU(s) is available/free, but "-num_worker=%d"'
                        % (num_avail_gpu, num_all_gpu, self.num_worker))
                    self.logger.warn(
                        'multiple workers will be allocated to one GPU, '
                        'may not scale well and may raise out-of-memory')
                device_map = (avail_gpu * self.num_worker)[:self.num_worker]
                run_on_gpu = True
            except FileNotFoundError:
                self.logger.warn(
                    'nvidia-smi is missing, often means no gpu on this machine. '
                    'fall back to cpu!')

        self.logger.info(
            'device_map: \n\t\t%s' %
            '\n\t\t'.join('worker %2d -> %s' %
                          (w_id, ('gpu %2d' % g_id) if g_id >= 0 else 'cpu')
                          for w_id, g_id in enumerate(device_map)))
        # start the backend processes
        for idx, device_id in enumerate(device_map):
            process = BertWorker(idx, self.args, self.addr_backend,
                                 self.addr_sink, device_id)
            self.processes.append(process)
            process.start()

        while True:
            try:
                request = self.frontend.recv_multipart()
                client, msg, req_id = request
                if msg == ServerCommand.show_config:
                    self.logger.info(
                        'new config request\treq id: %d\tclient: %s' %
                        (int(req_id), client))
                    self.sink.send_multipart([
                        client, msg,
                        jsonapi.dumps({
                            **{
                                'client': client.decode('ascii'),
                                'num_subprocess': len(self.processes),
                                'ventilator -> worker': self.addr_backend,
                                'worker -> sink': self.addr_sink,
                                'ventilator <-> sink': self.addr_front2sink,
                                'server_current_time': str(datetime.now()),
                                'num_request': num_req,
                                'run_on_gpu': run_on_gpu,
                                'server_version': __version__
                            },
                            **self.args_dict
                        }), req_id
                    ])
                    continue

                self.logger.info('new encode request\treq id: %d\tclient: %s' %
                                 (int(req_id), client))
                num_req += 1
                seqs = jsonapi.loads(msg)
                num_seqs = len(seqs)
                # register a new job at sink
                self.sink.send_multipart(
                    [client, ServerCommand.new_job,
                     b'%d' % num_seqs, req_id])

                job_id = client + b'#' + req_id
                if num_seqs > self.max_batch_size:
                    # partition the large batch into small batches
                    s_idx = 0
                    while s_idx < num_seqs:
                        tmp = seqs[s_idx:(s_idx + self.max_batch_size)]
                        if tmp:
                            partial_job_id = job_id + b'@%d' % s_idx
                            self.backend.send_multipart(
                                [partial_job_id,
                                 jsonapi.dumps(tmp)])
                        s_idx += len(tmp)
                else:
                    self.backend.send_multipart([job_id, msg])
            except zmq.error.ContextTerminated:
                self.logger.error('context is closed!')
            except ValueError:
                self.logger.error(
                    'received a wrongly-formatted request (expected 3 frames, got %d)'
                    % len(request))
                self.logger.error('\n'.join('field %d: %s' % (idx, k)
                                            for idx, k in enumerate(request)))
    def encode(self,
               texts,
               blocking=True,
               is_tokenized=False,
               show_tokens=False):
        """ Encode a list of strings to a list of vectors

        `texts` should be a list of strings, each of which represents a sentence.
        If `is_tokenized` is set to True, then `texts` should be list[list[str]],
        outer list represents sentence and inner list represent tokens in the sentence.
        Note that if `blocking` is set to False, then you need to fetch the result manually afterwards.

        .. highlight:: python
        .. code-block:: python

            with BertClient() as bc:
                # encode untokenized sentences
                bc.encode(['First do it',
                          'then do it right',
                          'then do it better'])

                # encode tokenized sentences
                bc.encode([['First', 'do', 'it'],
                           ['then', 'do', 'it', 'right'],
                           ['then', 'do', 'it', 'better']], is_tokenized=True)

        :type is_tokenized: bool
        :type show_tokens: bool
        :type blocking: bool
        :type timeout: bool
        :type texts: list[str] or list[list[str]]
        :param is_tokenized: whether the input texts is already tokenized
        :param show_tokens: whether to include tokenization result from the server. If true, the return of the function will be a tuple
        :param texts: list of sentence to be encoded. Larger list for better efficiency.
        :param blocking: wait until the encoded result is returned from the server. If false, will immediately return.
        :param timeout: throw a timeout error when the encoding takes longer than the predefined timeout.
        :return: encoded sentence/token-level embeddings, rows correspond to sentences
        :rtype: numpy.ndarray or list[list[float]]

        """
        if is_tokenized:
            self._check_input_lst_lst_str(texts)
        else:
            self._check_input_lst_str(texts)

        if self.length_limit is None:
            warnings.warn(
                'server does not put a restriction on "max_seq_len", '
                'it will determine "max_seq_len" dynamically according to the sequences in the batch. '
                'you can restrict the sequence length on the client side for better efficiency'
            )
        elif self.length_limit and not self._check_length(
                texts, self.length_limit, is_tokenized):
            warnings.warn(
                'some of your sentences have more tokens than "max_seq_len=%d" set on the server, '
                'as consequence you may get less-accurate or truncated embeddings.\n'
                'here is what you can do:\n'
                '- disable the length-check by create a new "BertClient(check_length=False)" '
                'when you do not want to display this warning\n'
                '- or, start a new server with a larger "max_seq_len"' %
                self.length_limit)

        req_id = self._send(jsonapi.dumps(texts), len(texts))
        if not blocking:
            return None
        r = self._recv_ndarray(req_id)
        if self.token_info_available and show_tokens:
            return r.embedding, r.tokens
        elif not self.token_info_available and show_tokens:
            warnings.warn(
                '"show_tokens=True", but the server does not support showing tokenization info to clients.\n'
                'here is what you can do:\n'
                '- start a new server with "bert-serving-start -show_tokens_to_client ..."\n'
                '- or, use "encode(show_tokens=False)"')
        return r.embedding
Exemple #41
0
 def result(self):
     # Sort the results
     sort_idx = np.argsort(self.output_ids)
     outputs = np.array(self.outputs)[sort_idx].tolist()
     outputs = [elem for output in outputs for elem in output]
     return jsonapi.dumps(outputs)
Exemple #42
0
def send_ndarray(src, dest, X, req_id=b'', flags=0, copy=True, track=False):
    """send a numpy array with metadata"""
    md = dict(dtype=str(X.dtype), shape=X.shape)
    return src.send_multipart([dest, jsonapi.dumps(md), X, req_id], flags, copy=copy, track=track)
Exemple #43
0
 def on_lock_result(self, topic, headers, message, match):
     _log.debug("Topic: {topic}, {headers}, Message: {message}".format(
             topic=topic, headers=headers, message=message))
     self.publish(topics.ACTUATOR_LOCK_RESULT() + match.group(0),
                  headers, jsonapi.dumps('SUCCESS'))
Exemple #44
0
    def _run(self, _, frontend, sink, *backend_socks):

        def push_new_job(_job_id, _json_msg, _msg_len):
            _sock = rand_backend_socket
            _sock.send_multipart([_job_id, _json_msg])

        self.logger.info(f'Bind all sockets. Use ports '
                         f'{self.port}/{self.port_out}')
        frontend.bind(f'tcp://*:{self.port}')
        addr_front2sink = auto_bind(sink)
        addr_backend_list = [auto_bind(b) for b in backend_socks]
        self.logger.info(f'open {len(addr_backend_list)} ventilator-worker '
                         'sockets')

        self.logger.info('Start the sink')
        proc_sink = Sink(self.port_out, addr_front2sink)
        self.processes.append(proc_sink)
        proc_sink.start()
        addr_sink = sink.recv().decode('ascii')

        # start the backend processes
        device_map = [-1] * self.n_workers
        for idx, device_id in enumerate(device_map):
            process = self.Worker(idx, addr_backend_list, addr_sink)
            self.processes.append(process)
            process.start()

        rand_backend_socket = None
        server_status = ServerStatistic()

        for p in self.processes:
            p.is_ready.wait()

        self.is_ready.set()
        self.logger.info('all set, ready to serve request!')

        while True:
            try:
                request = frontend.recv_multipart()
                client, msg, req_id, msg_len = request
            except ValueError:
                self.logger.error(
                    'received a wrongly-formatted request (expected 4 frames, got %d)' % len(request))
                self.logger.error('\n'.join('field %d: %s' % (idx, k)
                                            for idx, k in enumerate(request)), exc_info=True)
            else:
                server_status.update(request)
                if msg == ServerCmd.terminate:
                    break
                elif msg == ServerCmd.show_config:
                    self.logger.info(
                        'new config request\treq id: %d\tclient: %s' % (int(req_id), client))
                    status_runtime = {'client': client.decode('ascii'),
                                      'num_process': len(self.processes),
                                      'ventilator -> worker': addr_backend_list,
                                      'worker -> sink': addr_sink,
                                      'ventilator <-> sink': addr_front2sink,
                                      'server_current_time': str(datetime.now()),
                                      'statistic': server_status.value,
                                      'device_map': device_map,
                                      'n_concurrent_sockets': self.n_concurrent_sockets}

                    sink.send_multipart([client, msg, jsonapi.dumps({**status_runtime,
                                                                     **self.status_static}), req_id])
                else:
                    self.logger.info('new encode request\treq id: %d\tsize: %d\tclient: %s' %
                                     (int(req_id), int(msg_len), client))
                    # register a new job at sink
                    sink.send_multipart(
                        [client, ServerCmd.new_job, msg_len, req_id])

                    # renew the backend socket to prevent large job queueing up
                    # [0] is reserved for high priority job
                    # last used backennd shouldn't be selected either as it may be queued up already
                    rand_backend_socket = random.choice(
                        [b for b in backend_socks[1:] if b != rand_backend_socket])

                    # push a new job, note super large job will be pushed to one socket only,
                    # leaving other sockets free
                    job_id = client + b'#' + req_id
                    if int(msg_len) > self.max_batch_size:
                        seqs = jsonapi.loads(msg)
                        job_gen = []
                        for i in range(0, int(msg_len), self.max_batch_size):
                            pid = job_id + b'@%d' % i
                            pjob = seqs[i:(i + self.max_batch_size)]
                            job_gen.append((pid, pjob))

                        for partial_job_id, job in job_gen:
                            push_new_job(partial_job_id,
                                         jsonapi.dumps(job), len(job))
                    else:
                        push_new_job(job_id, msg, int(msg_len))

        for p in self.processes:
            p.close()
        self.logger.info('terminated!')
Exemple #45
0
    def run(self):
        self.context = zmq.Context()
        self.frontend = self.context.socket(zmq.ROUTER)
        self.frontend.bind('tcp://*:%d' % self.port)
        # self.frontend.setsockopt(zmq.ROUTER_MANDATORY, 1)

        self.backend = self.context.socket(zmq.PUSH)
        self.backend.bind('ipc://*')
        backend_addr = self.backend.getsockopt(
            zmq.LAST_ENDPOINT).decode('ascii')

        # start the sink thread
        sink_thread = BertSink(self.args, self.frontend, self.client_checksum)
        sink_thread.start()
        self.processes.append(sink_thread)

        available_gpus = range(self.num_worker)
        try:
            import GPUtil
            available_gpus = GPUtil.getAvailable(limit=self.num_worker)
            if len(available_gpus) < self.num_worker:
                self.logger.warn(
                    'only %d GPU(s) is available, but ask for %d' %
                    (len(available_gpus), self.num_worker))
        except FileNotFoundError:
            self.logger.warn(
                'nvidia-smi is missing, often means no gpu found on this machine. '
                'will run service on cpu instead')

        # start the backend processes
        for i in available_gpus:
            process = BertWorker(i, self.args, backend_addr,
                                 sink_thread.address)
            self.processes.append(process)
            process.start()

        while not self.exit_flag.is_set():
            client, _, msg = self.frontend.recv_multipart()
            if msg == b'SHOW_CONFIG':
                self.frontend.send_multipart([
                    client, b'',
                    jsonapi.dumps({
                        **{
                            'client': client.decode('ascii'),
                            'num_process': len(self.processes),
                            'ipc_backend': backend_addr,
                            'ipc_sink': sink_thread.address
                        },
                        **self.args_dict
                    })
                ])
                continue

            seqs = pickle.loads(msg)
            num_seqs = len(seqs)
            self.client_checksum[client] = num_seqs

            if num_seqs > self.max_batch_size:
                # divide the large batch into small batches
                s_idx = 0
                while s_idx < num_seqs:
                    tmp = seqs[s_idx:(s_idx + self.max_batch_size)]
                    if tmp:
                        # get the worker with minimum workload
                        client_partial_id = client + b'@%d' % s_idx
                        self.backend.send_multipart([
                            client_partial_id, b'',
                            pickle.dumps(tmp, protocol=-1)
                        ])
                    s_idx += len(tmp)
            else:
                self.backend.send_multipart([client, b'', msg])

        self.frontend.close()
        self.backend.close()
        self.context.term()
Exemple #46
0
def install_agent(opts, package, config):
    """
    The main installation method for installing the agent on the correct local
    platform instance.

    :param opts:
    :param package:
    :param config:
    :return:
    """
    if config is None:
        config = {}

    # if not a dict then config should be a filename
    if not isinstance(config, dict):
        config_file = config
    else:
        cfg = tempfile.NamedTemporaryFile()
        with open(cfg.name, 'w') as fout:
            fout.write(jsonapi.dumps(config))
        config_file = cfg.name

    try:
        with open(config_file) as fp:
            data = json.load(fp)
    except:
        log.error("Invalid json config file.")
        sys.exit(-10)

    # Configure the whl file before installing.
    add_files_to_package(opts.package, {'config_file': config_file})
    env = _build_copy_env(opts)
    if opts.vip_identity:
        cmds = [opts.volttron_control, "upgrade", opts.vip_identity, package]
    else:
        cmds = [opts.volttron_control, "install", package]

    if opts.tag:
        cmds.extend(["--tag", opts.tag])

    process = Popen(cmds,
                    env=env,
                    stderr=subprocess.PIPE,
                    stdout=subprocess.PIPE)
    (output, errorout) = process.communicate()

    parsed = output.split("\n")

    # If there is not an agent with that identity:
    # 'Could not find agent with VIP IDENTITY "BOO". Installing as new agent
    # Installed /home/volttron/.volttron/packaged/listeneragent-3.2-py2-none-any.whl as 6ccbf8dc-4929-4794-9c8e-3d8c6a121776 listeneragent-3.2'

    # The following is standard output of an agent that was previously installed
    # If the agent was not previously installed then only the second line
    # would have been output to standard out.
    #
    # Removing previous version of agent "foo"
    # Installed /home/volttron/.volttron/packaged/listeneragent-3.2-py2-none-any.whl as 81b811ff-02b5-482e-af01-63d2fd95195a listeneragent-3.2

    if 'Could not' in parsed[0]:
        agent_uuid = parsed[1].split()[-2]
    elif 'Removing' in parsed[0]:
        agent_uuid = parsed[1].split()[-2]
    else:
        agent_uuid = parsed[0].split()[-2]

    output_dict = dict(agent_uuid=agent_uuid)

    if opts.start:
        cmds = [opts.volttron_control, "start", agent_uuid]
        process = Popen(cmds,
                        env=env,
                        stderr=subprocess.PIPE,
                        stdout=subprocess.PIPE)
        (outputdata, errordata) = process.communicate()

        # Expected output on standard out
        # Starting 83856b74-76dc-4bd9-8480-f62bd508aa9c listeneragent-3.2
        if 'Starting' in outputdata:
            output_dict['starting'] = True

    if opts.enable:
        cmds = [opts.volttron_control, "enable", agent_uuid]

        if opts.priority != -1:
            cmds.extend(["--priority", str(opts.priority)])

        process = Popen(cmds,
                        env=env,
                        stderr=subprocess.PIPE,
                        stdout=subprocess.PIPE)
        (outputdata, errordata) = process.communicate()
        # Expected output from standard out
        # Enabling 6bcee29b-7af3-4361-a67f-7d3c9e986419 listeneragent-3.2 with priority 50
        if "Enabling" in outputdata:
            output_dict['enabling'] = True
            output_dict['priority'] = outputdata.split("\n")[0].split()[-1]

    if opts.start:
        # Pause for agent_start_time seconds before verifying that the agent
        sleep(opts.agent_start_time)

        cmds = [opts.volttron_control, "status", agent_uuid]
        process = Popen(cmds,
                        env=env,
                        stderr=subprocess.PIPE,
                        stdout=subprocess.PIPE)
        (outputdata, errordata) = process.communicate()

        # 5 listeneragent-3.2 foo     running [10737]
        output_dict["started"] = "running" in outputdata
        if output_dict["started"]:
            pidpos = outputdata.index('[') + 1
            pidend = outputdata.index(']')
            output_dict['agent_pid'] = int(outputdata[pidpos:pidend])

    if opts.json:
        sys.stdout.write("%s\n" % json.dumps(output_dict, indent=4))
    if opts.csv:
        keylen = len(output_dict.keys())
        keyline = ''
        valueline = ''
        keys = output_dict.keys()
        for k in range(keylen):
            if k < keylen - 1:
                keyline += "%s," % keys[k]
                valueline += "%s," % output_dict[keys[k]]
            else:
                keyline += "%s" % keys[k]
                valueline += "%s" % output_dict[keys[k]]
        sys.stdout.write("%s\n%s\n" % (keyline, valueline))
 def _send_status_message(self, status):
     msg = self.session.msg("status", {'execution_state': status})
     self.write_message(jsonapi.dumps(msg, default=date_default))
 def get(self):
     km = self.kernel_manager
     self.finish(jsonapi.dumps(km.list_kernel_ids()))
Exemple #49
0
 def publish_json(self, topic, headers, *msg_parts, **kwargs):
     '''Publish JSON encoded message.'''
     msg = [('application/json', jsonapi.dumps(msg)) for msg in msg_parts]
     self._pub.send_message_ex(topic, headers, *msg, **kwargs)
Exemple #50
0
 def handle_autodiscover_message(self, fd_no, type):
     __, address = self.udp_socket.recvfrom(1024)
     self.udp_socket.sendto(json.dumps({'endpoint': self.endpoint}),
                            address)
Exemple #51
0
 def on_recv(self, msg):
     msg = self.kernel.session.feed_identities(msg)[1]
     msg = self.kernel.session.unserialize(msg)
     addr = self.waiting.pop(msg["parent_header"]["msg_id"])
     addr.send("complete," + jsonapi.dumps(msg, default=misc.sage_json))
 def configure_plain(self, domain='*', passwords=None):
     self.pipe.send_multipart([
         b'PLAIN',
         b(domain, self.encoding),
         jsonapi.dumps(passwords or {})
     ])
Exemple #53
0
 def get(self):
     km = self.application.kernel_manager
     self.finish(jsonapi.dumps(km.kernel_ids))
Exemple #54
0
            if isinstance(key, unicode_type):
                obj[squash_unicode(key)] = obj.pop(key)
    elif isinstance(obj, list):
        for i, v in enumerate(obj):
            obj[i] = squash_unicode(v)
    elif isinstance(obj, unicode_type):
        obj = obj.encode('utf8')
    return obj


#-----------------------------------------------------------------------------
# globals and defaults
#-----------------------------------------------------------------------------

# ISO8601-ify datetime objects
json_packer = lambda obj: jsonapi.dumps(obj, default=date_default)
json_unpacker = lambda s: extract_dates(jsonapi.loads(s))

pickle_packer = lambda o: pickle.dumps(o, -1)
pickle_unpacker = pickle.loads

default_packer = json_packer
default_unpacker = json_unpacker

DELIM = b"<IDS|MSG>"
# singleton dummy tracker, which will always report as done
DONE = zmq.MessageTracker()

#-----------------------------------------------------------------------------
# Mixin tools for apps that use Sessions
#-----------------------------------------------------------------------------
Exemple #55
0
 def on_recv(self, msg):
     msg = self.session.feed_identities(msg)[1]
     msg = self.session.unserialize(msg)
     msg_id = msg["parent_header"]["msg_id"]
     kc = self.waiting.pop(msg_id)
     kc.send("complete," + jsonapi.dumps(msg, default=sage_json))
Exemple #56
0
def process_main_config(main_file, output_directory, keep=False):
    main_config = parse_json_config(main_file.read())
    driver_list = main_config.pop("driver_config_list")
    driver_count = len(driver_list)

    csv_name_map = {}
    csv_contents = {}

    driver_configs = {}

    for config_path in driver_list:
        new_config_name, device_config = process_driver_config(
            config_path, csv_name_map, csv_contents)

        if new_config_name in driver_configs:
            print "WARNING DUPLICATE DEVICES:", new_config_name, "FOUND IN", config_path

        driver_configs[new_config_name] = device_config

    staggered_start = main_config.pop('staggered_start', None)

    if staggered_start is not None:
        main_config["driver_scrape_interval"] = staggered_start / float(
            driver_count)

    print "New Main config:"
    pprint(main_config)
    print

    if not os.path.exists(output_directory):
        os.makedirs(output_directory)

    os.chdir(output_directory)

    devices_path = "devices"
    registries_path = "registry_configs"

    if not keep:
        if os.path.exists(devices_path):
            shutil.rmtree(devices_path, ignore_errors=True)

        if os.path.exists(registries_path):
            shutil.rmtree(registries_path, ignore_errors=True)

    if not os.path.exists(devices_path):
        os.makedirs(devices_path)

    if not os.path.exists(registries_path):
        os.makedirs(registries_path)

    print "Writing 'config'"
    with open("config", "w") as f:
        f.write(jsonapi.dumps(main_config, indent=2))

    for name, contents in csv_contents.iteritems():
        print "Writing", name
        with open(name, "w") as f:
            f.write(contents)

    unique_paths = set()

    for name, config in driver_configs.iteritems():
        print "Writing", name
        dir_name = os.path.dirname(name)

        if dir_name not in unique_paths and not os.path.exists(dir_name):
            os.makedirs(dir_name)

        unique_paths.add(dir_name)

        with open(name, "w") as f:
            f.write(jsonapi.dumps(config, indent=2))
Exemple #57
0
    def post(self):
        if config.get_config("requires_tos") and \
                self.get_argument("accepted_tos", "false") != "true":
            self.write(
                """When evaluating code, you must acknowledge your acceptance
of the terms of service at /static/tos.html by passing the parameter
accepted_tos=true\n""")
            self.set_status(403)
            self.finish()
            return
        default_timeout = 30  # seconds
        code = "".join(self.get_arguments('code', strip=False))
        if len(code) > 65000:
            self.set_status(413)
            self.write("Max code size is 65000 characters")
            self.finish()
            return
        if code:
            km = self.application.km
            remote_ip = self.request.remote_ip
            referer = self.request.headers.get('Referer', '')
            self.kernel_id = yield gen.Task(km.new_session_async,
                                            referer=referer,
                                            remote_ip=remote_ip,
                                            timeout=0)
            if not (remote_ip == "::1" and referer == ""
                    and cron.match(code) is not None):
                sm = StatsMessage(kernel_id=self.kernel_id,
                                  remote_ip=remote_ip,
                                  referer=referer,
                                  code=code,
                                  execute_type='service')
                if remote_ip == "127.0.0.1" and self.kernel_id:
                    stats_logger.debug(sm)
                else:
                    stats_logger.info(sm)

            self.shell_handler = ShellServiceHandler(self.application)
            self.iopub_handler = IOPubServiceHandler(self.application)
            self.iopub_handler.open(self.kernel_id)
            self.shell_handler.open(self.kernel_id)

            loop = ioloop.IOLoop.instance()

            self.success = False

            def done(msg):
                if msg["msg_type"] == "execute_reply":
                    self.success = msg["content"]["status"] == "ok"
                    self.user_variables = msg["content"].get(
                        "user_variables", [])
                    self.execute_reply = msg['content']
                    loop.remove_timeout(self.timeout_request)
                    loop.add_callback(self.finish_request)

            self.shell_handler.msg_from_kernel_callbacks.append(done)
            self.timeout_request = loop.add_timeout(
                time.time() + default_timeout, self.timeout_request)
            exec_message = {
                "parent_header": {},
                "header": {
                    "msg_id": str(uuid.uuid4()),
                    "username": "",
                    "session": self.kernel_id,
                    "msg_type": "execute_request",
                },
                "content": {
                    "code": code,
                    "silent": False,
                    "user_variables": self.get_arguments('user_variables'),
                    "user_expressions": {},
                    "allow_stdin": False,
                },
                "metadata": {}
            }
            self.shell_handler.on_message(jsonapi.dumps(exec_message))
 def test_publish(self):
     stat = {'subtopic': 1, 'foo': 'bar'}
     self.publisher.publish('foobar', stat)
     self.publisher.socket.send_multipart.assert_called_with(
         [b'stat.foobar.1', json.dumps(stat)])
Exemple #59
0
        obj = obj.encode('utf8')
    return obj

#-----------------------------------------------------------------------------
# globals and defaults
#-----------------------------------------------------------------------------

# default values for the thresholds:
MAX_ITEMS = 64
MAX_BYTES = 1024

# ISO8601-ify datetime objects
# allow unicode
# disallow nan, because it's not actually valid JSON
json_packer = lambda obj: jsonapi.dumps(obj, default=date_default,
    ensure_ascii=False, allow_nan=False,
)
json_unpacker = lambda s: jsonapi.loads(s)

pickle_packer = lambda o: pickle.dumps(squash_dates(o), PICKLE_PROTOCOL)
pickle_unpacker = pickle.loads

default_packer = json_packer
default_unpacker = json_unpacker

DELIM = b"<IDS|MSG>"
# singleton dummy tracker, which will always report as done
DONE = zmq.MessageTracker()

#-----------------------------------------------------------------------------
# Mixin tools for apps that use Sessions
Exemple #60
0
    def run(self):
        available_gpus = range(self.num_worker)
        run_on_gpu = True
        num_req = 0
        try:
            import GPUtil
            available_gpus = GPUtil.getAvailable(limit=self.num_worker)
            if len(available_gpus) < self.num_worker:
                self.logger.warn(
                    'only %d GPU(s) is available, but ask for %d' %
                    (len(available_gpus), self.num_worker))
        except FileNotFoundError:
            self.logger.warn(
                'nvidia-smi is missing, often means no gpu found on this machine. '
                'will fall back to cpu!')
            run_on_gpu = False

        # start the backend processes
        for i in available_gpus:
            process = BertWorker(i, self.args, self.addr_backend,
                                 self.addr_sink)
            self.processes.append(process)
            process.start()

        try:
            while True:
                client, msg, req_id = self.frontend.recv_multipart()
                if msg == ServerCommand.show_config:
                    self.logger.info(
                        'new config request\treq id: %d\tclient: %s' %
                        (int(req_id), client))
                    self.sink.send_multipart([
                        client, msg,
                        jsonapi.dumps({
                            **{
                                'client': client.decode('ascii'),
                                'num_subprocess': len(self.processes),
                                'ventilator -> worker': self.addr_backend,
                                'worker -> sink': self.addr_sink,
                                'ventilator <-> sink': self.addr_front2sink,
                                'server_current_time': str(datetime.now()),
                                'run_on_gpu': run_on_gpu,
                                'num_request': num_req,
                                'server_version': __version__
                            },
                            **self.args_dict
                        }), req_id
                    ])
                    continue

                self.logger.info('new encode request\treq id: %d\tclient: %s' %
                                 (int(req_id), client))
                num_req += 1
                seqs = jsonapi.loads(msg)
                num_seqs = len(seqs)
                # register a new job at sink
                self.sink.send_multipart(
                    [client, ServerCommand.new_job,
                     b'%d' % num_seqs, req_id])

                job_id = client + b'#' + req_id
                if num_seqs > self.max_batch_size:
                    # partition the large batch into small batches
                    s_idx = 0
                    while s_idx < num_seqs:
                        tmp = seqs[s_idx:(s_idx + self.max_batch_size)]
                        if tmp:
                            partial_job_id = job_id + b'@%d' % s_idx
                            self.backend.send_multipart(
                                [partial_job_id,
                                 jsonapi.dumps(tmp)])
                        s_idx += len(tmp)
                else:
                    self.backend.send_multipart([job_id, msg])
        except zmq.error.ContextTerminated:
            self.logger.error('context is closed!')