class DeviceExampleServer(katcp.DeviceServer): ## Interface version information. VERSION_INFO = ("example-server", 0, 1) ## Device server build / instance information. BUILD_INFO = ("my-example-server", 0, 1, "rc1") #pylint: disable-msg=R0904 def setup_sensors(self): pass def request_echo(self, sock, msg): """Echo the arguments of the message sent.""" return katcp.Message.reply(msg.name, "ok", *msg.arguments) @request(Str(), Int()) @return_reply(Str()) def request_repeat(self, sock, txt, n): """Repeat txt n times.""" return ("ok", txt * n) @request(Float(), Float()) @return_reply(Float()) def request_add(self, sock, x, y): """Add x and y.""" return ("ok", x + y) @request(Int(), Int()) @return_reply(Int()) def request_intdiv(self, sock, x, y): """Perform integer division of x and y.""" return ("ok", x // y)
class ManagementNode(NodeServer): """Katcp server for cluster head nodes. Notes: This is the basis of the top level interface for FBF/APSUSE. """ VERSION_INFO = ("reynard-managementnode-api", 0, 1) BUILD_INFO = ("reynard-managementnode-implementation", 0, 1, "rc1") def __init__(self, server_host, server_port, config): self._clients = {} super(ManagementNode, self).__init__(server_host, server_port, config) def start(self): """Start the server Based on the passed configuration object this is where the clients for suboridnates nodes will be set up. """ super(ManagementNode, self).start() self.ioloop.add_callback(self._setup_clients) def _setup_clients(self): """Setup clients based on configuration object.""" for node, port in self._config.NODES: name = '{node}-client'.format(node=node) self._add_client(name, '127.0.0.1', port) def _add_client(self, name, ip, port): """Add a named client.""" if name in self._clients.keys(): raise KeyError( "Client already exists with name '{name}'".format(name=name)) client = KATCPClientResource( dict(name=name, address=(ip, port), controlled=True)) client.start() self._clients[name] = client def _remove_client(self, name): """Remove a client by name.""" if name not in self._clients.keys(): raise KeyError( "No client exists with name '{name}'".format(name=name)) self._clients[name].stop() self._clients[name].join() del self._clients[name] @request(Str(), Str(), Int()) @return_reply(Str()) def request_client_add(self, req, name, ip, port): """Add a new client.""" try: self._add_client(name, ip, port) except KeyError, e: return ("fail", str(e)) return ("ok", "added client")
class FakeCamEventServer(DeviceServer): """Device server that serves fake CAM events for a simulated observation. Parameters ---------- attributes : dict mapping string to string Attributes as key-value string pairs which are streamed once upfront sensors : file object, string, list of strings or generator File-like object or filename of CSV file listing sensors to serve, with header row followed by rows with format 'name, description, unit, type' """ VERSION_INFO = ("fake_cam_event", 0, 1) BUILD_INFO = ("fake_cam_event", 0, 1, __version__) def __init__(self, attributes, sensors, *args, **kwargs): self.attributes = attributes self.sensors = np.loadtxt(sensors, delimiter=',', skiprows=1, dtype=np.str) super(FakeCamEventServer, self).__init__(*args, **kwargs) def setup_sensors(self): """Populate sensor objects on server.""" for fields in self.sensors: self.add_sensor(FakeSensor(*[f.strip() for f in fields])) @return_reply(Str()) def request_get_attributes(self, req, msg): """Return dictionary of attributes.""" logger.info('Returning %d attributes' % (len(attributes,))) return ("ok", repr(self.attributes))
class DefaultConfigurationAuthority(BaseFbfConfigurationAuthority): def __init__(self, host, port): super(DefaultConfigurationAuthority, self).__init__(host, port) self.default_config = { u'coherent-beams-nbeams': 100, u'coherent-beams-tscrunch': 16, u'coherent-beams-fscrunch': 1, u'coherent-beams-granularity': 6, u'incoherent-beam-tscrunch': 16, u'incoherent-beam-fscrunch': 1, } @tornado.gen.coroutine def get_target_config(self, product_id, target): # Return just a boresight beam raise Return({"beams": [target]}) @tornado.gen.coroutine def get_sb_config(self, product_id, sb_id): raise Return(self.default_config) @request(Str()) @return_reply() def request_update_default_sb_config(self, req, config_json): """ @brief Update the default config returned on a get_sb_config call. This is intended for testing purposes only. """ self.default_config.update(json.loads(config_json)) return ("ok", )
def setUp(self): basic = Str() default = Str(default="something") optional = Str(optional=True) default_optional = Str(default="something", optional=True) # For packing, the input can be any type except NoneType. self._pack = [ (basic, "adsasdasd", b"adsasdasd"), (basic, b"adsasdasd", b"adsasdasd"), (basic, u"adsasdasd", b"adsasdasd"), (basic, u"skräm", b"skr\xc3\xa4m"), (basic, [1, 2.0, 'three', False], b"[1, 2.0, 'three', False]"), (basic, None, ValueError), (default, None, b"something"), (default_optional, None, b"something"), (optional, None, ValueError), ] # For unpacking, the input is assumed to be a byte string, and # the output should be a native string (bytes on PY2, unicode on PY3). # On PY2, as input is assumed to be bytes already, nothing is done, # nor is the type checked. if future.utils.PY2: bytes_unpacked = "adsasdasd" unicode_unpacked = u"adsasdasd" utf8_unpacked = "skr\xc3\xa4m" list_str_unpacked = "[1, 2.0, 'three', False]" else: bytes_unpacked = "adsasdasd" unicode_unpacked = AttributeError utf8_unpacked = "skräm" list_str_unpacked = "[1, 2.0, 'three', False]" self._unpack = [ (basic, b"adsasdasd", bytes_unpacked), (basic, u"adsasdasd", unicode_unpacked), (basic, b"skr\xc3\xa4m", utf8_unpacked), (basic, b"[1, 2.0, 'three', False]", list_str_unpacked), (basic, None, ValueError), (default, None, "something"), (default_optional, None, "something"), (optional, None, None), ]
class PafBackendController(AsyncDeviceServer): VERSION_INFO = ("paf-backend-controller-api", 0, 1) BUILD_INFO = ("paf-backend-controller-implementation", 0, 1, "rc1") DEVICE_STATUSES = ["ok", "degraded", "fail"] def __init__(self, ip, port): super(PafBackendController, self).__init__(ip, port) def setup_sensors(self): self._device_status = Sensor.discrete( "device-status", description="Health status of PafBackendController", params=self.DEVICE_STATUSES, default="ok", initial_status=Sensor.UNKNOWN) self.add_sensor(self._device_status) def start(self): super(PafBackendController, self).start() @request(Int(), Float(), Bool()) @return_reply(Int(), Float()) def request_myreq(self, req, my_int, my_float, my_bool): '''?myreq my_int my_float my_bool''' return ("ok", my_int + 1, my_float / 2.0) @request(Str()) @return_reply(Str()) def request_echo(self, req, message): """ @brief A request that echos a message """ return ("ok", message) @request(Str()) @return_reply(Str()) def request_echomine(self, req, message): """ @brief A request that echos a message """ return ("ok", message)
def setUp(self): basic = Str() default = Str(default="something") optional = Str(optional=True) default_optional = Str(default="something", optional=True) self._pack = [ (basic, "adsasdasd", "adsasdasd"), (basic, None, ValueError), (default, None, "something"), (default_optional, None, "something"), (optional, None, ValueError), ] self._unpack = [ (basic, "adsasdasd", "adsasdasd"), (basic, None, ValueError), (default, None, "something"), (default_optional, None, "something"), (optional, None, None), ]
def test_pack_types_more_types_than_args(self): expected = [b'one', b'2', b'1', b'four'] self.check_packing( [Str(), Int(), Bool(default=True), Str(default='four')], ['one', 2], expected)
class KATCPServer (DeviceServer): VERSION_INFO = ("ptuse-api", 1, 0) BUILD_INFO = ("ptuse-implementation", 0, 1, "") # Optionally set the KATCP protocol version and features. Defaults to # the latest implemented version of KATCP, with all supported optional # features PROTOCOL_INFO = ProtocolFlags(5, 0, set([ ProtocolFlags.MULTI_CLIENT, ProtocolFlags.MESSAGE_IDS, ])) def __init__ (self, server_host, server_port, script): self.script = script self._host_sensors = {} self._beam_sensors = {} self._data_product = {} self._data_product["id"] = "None" self.data_product_res = [] self.data_product_res.append(re.compile ("^[a-zA-Z]+_1")) self.data_product_res.append(re.compile ("^[a-zA-Z]+_2")) self.data_product_res.append(re.compile ("^[a-zA-Z]+_3")) self.data_product_res.append(re.compile ("^[a-zA-Z]+_4")) self.script.log(2, "KATCPServer::__init__ starting DeviceServer on " + server_host + ":" + str(server_port)) DeviceServer.__init__(self, server_host, server_port) DEVICE_STATUSES = ["ok", "degraded", "fail"] def setup_sensors(self): """Setup server sensors.""" self.script.log(2, "KATCPServer::setup_sensors()") self._device_status = Sensor.discrete("device-status", description="Status of entire system", params=self.DEVICE_STATUSES, default="ok") self.add_sensor(self._device_status) self._beam_name = Sensor.string("beam-name", description="name of configured beam", unit="", default="") self.add_sensor(self._beam_name) # setup host based sensors self._host_name = Sensor.string("host-name", description="hostname of this server", unit="", default="") self.add_sensor(self._host_name) self.script.log(2, "KATCPServer::setup_sensors lmc="+str(self.script.lmc)) (host, port) = self.script.lmc.split(":") self.setup_sensors_host (host, port) self.script.log(2, "KATCPServer::setup_sensors beams="+str(self.script.beam)) self.setup_sensors_beam (self.script.beam_name) # add sensors based on the reply from the specified host def setup_sensors_host (self, host, port): self.script.log(2, "KATCPServer::setup_sensors_host ("+host+","+port+")") sock = sockets.openSocket (DL, host, int(port), 1) if sock: self.script.log(2, "KATCPServer::setup_sensors_host sock.send(" + self.script.lmc_cmd + ")") sock.send (self.script.lmc_cmd + "\r\n") lmc_reply = sock.recv (65536) sock.close() xml = xmltodict.parse(lmc_reply) self._host_sensors = {} # Disk sensors self.script.log(2, "KATCPServer::setup_sensors_host configuring disk sensors") disk_prefix = host+".disk" self._host_sensors["disk_size"] = Sensor.float(disk_prefix+".size", description=host+": disk size", unit="MB", params=[8192,1e9], default=0) self._host_sensors["disk_available"] = Sensor.float(disk_prefix+".available", description=host+": disk available space", unit="MB", params=[1024,1e9], default=0) self.add_sensor(self._host_sensors["disk_size"]) self.add_sensor(self._host_sensors["disk_available"]) # Server Load sensors self.script.log(2, "KATCPServer::setup_sensors_host configuring load sensors") self._host_sensors["num_cores"] = Sensor.integer (host+".num_cores", description=host+": disk available space", unit="MB", params=[1,64], default=0) self._host_sensors["load1"] = Sensor.float(host+".load.1min", description=host+": 1 minute load ", unit="", default=0) self._host_sensors["load5"] = Sensor.float(host+".load.5min", description=host+": 5 minute load ", unit="", default=0) self._host_sensors["load15"] = Sensor.float(host+".load.15min", description=host+": 15 minute load ", unit="", default=0) self.add_sensor(self._host_sensors["num_cores"]) self.add_sensor(self._host_sensors["load1"]) self.add_sensor(self._host_sensors["load5"]) self.add_sensor(self._host_sensors["load15"]) cpu_temp_pattern = re.compile("cpu[0-9]+_temp") fan_speed_pattern = re.compile("fan[0-9,a-z]+") power_supply_pattern = re.compile("ps[0-9]+_status") self.script.log(2, "KATCPServer::setup_sensors_host configuring other metrics") if not xml["lmc_reply"]["sensors"] == None: for sensor in xml["lmc_reply"]["sensors"]["metric"]: name = sensor["@name"] if name == "system_temp": self._host_sensors[name] = Sensor.float((host+".system_temp"), description=host+": system temperature", unit="C", params=[-20,150], default=0) self.add_sensor(self._host_sensors[name]) if cpu_temp_pattern.match(name): (cpu, junk) = name.split("_") self._host_sensors[name] = Sensor.float((host+"." + name), description=host+": "+ cpu +" temperature", unit="C", params=[-20,150], default=0) self.add_sensor(self._host_sensors[name]) if fan_speed_pattern.match(name): self._host_sensors[name] = Sensor.float((host+"." + name), description=host+": "+name+" speed", unit="RPM", params=[0,20000], default=0) self.add_sensor(self._host_sensors[name]) if power_supply_pattern.match(name): self._host_sensors[name] = Sensor.boolean((host+"." + name), description=host+": "+name, unit="", default=0) self.add_sensor(self._host_sensors[name]) # TODO consider adding power supply sensors: e.g. # device-status-kronos1-powersupply1 # device-status-kronos1-powersupply2 # device-status-kronos2-powersupply1 # device-status-kronos2-powersupply2 # TODO consider adding raid/disk sensors: e.g. # device-status-<host>-raid # device-status-<host>-raid-disk1 # device-status-<host>-raid-disk2 self.script.log(2, "KATCPServer::setup_sensors_host done!") else: self.script.log(2, "KATCPServer::setup_sensors_host no sensors found") else: self.script.log(-2, "KATCPServer::setup_sensors_host: could not connect to LMC") # setup sensors for each beam def setup_sensors_beam (self, beam): b = str(beam) self._beam_sensors = {} self.script.log(2, "KATCPServer::setup_sensors_beam ="+b) self._beam_sensors["observing"] = Sensor.boolean("observing", description="Beam " + b + " is observing", unit="", default=0) self.add_sensor(self._beam_sensors["observing"]) self._beam_sensors["snr"] = Sensor.float("snr", description="SNR of Beam "+b, unit="", params=[0,1e9], default=0) self.add_sensor(self._beam_sensors["snr"]) self._beam_sensors["power"] = Sensor.float("power", description="Power Level of Beam "+b, unit="", default=0) self.add_sensor(self._beam_sensors["power"]) self._beam_sensors["integrated"] = Sensor.float("integrated", description="Length of integration for Beam "+b, unit="", default=0) self.add_sensor(self._beam_sensors["integrated"]) @request() @return_reply(Str()) def request_beam(self, req): """Return the configure beam name.""" return ("ok", self._beam_name.value()) @request() @return_reply(Str()) def request_host_name(self, req): """Return the name of this server.""" return ("ok", self._host_name.value()) @request() @return_reply(Float()) def request_snr(self, req): """Return the SNR for this beam.""" return ("ok", self._beam_sensors["snr"].value()) @request() @return_reply(Float()) def request_power(self, req): """Return the standard deviation of the 8-bit power level.""" return ("ok", self._beam_sensors["power"].value()) @request(Str(), Float()) @return_reply(Str()) def request_sync_time (self, req, data_product_id, adc_sync_time): """Set the ADC_SYNC_TIME for beam of the specified data product.""" if not data_product_id == self._data_product["id"]: return ("fail", "data product " + str (data_product_id) + " was not configured") self.script.beam_config["lock"].acquire() self.script.beam_config["ADC_SYNC_TIME"] = str(adc_sync_time) self.script.beam_config["lock"].release() return ("ok", "") @request(Str(), Str()) @return_reply(Str()) def request_target_start (self, req, data_product_id, target_name): """Commence data processing on specific data product and beam using target.""" self.script.log (1, "request_target_start(" + data_product_id + "," + target_name+")") self.script.beam_config["lock"].acquire() self.script.beam_config["ADC_SYNC_TIME"] = self.script.cam_config["ADC_SYNC_TIME"] self.script.beam_config["OBSERVER"] = self.script.cam_config["OBSERVER"] self.script.beam_config["ANTENNAE"] = self.script.cam_config["ANTENNAE"] self.script.beam_config["SCHEDULE_BLOCK_ID"] = self.script.cam_config["SCHEDULE_BLOCK_ID"] self.script.beam_config["EXPERIMENT_ID"] = self.script.cam_config["EXPERIMENT_ID"] self.script.beam_config["DESCRIPTION"] = self.script.cam_config["DESCRIPTION"] self.script.beam_config["lock"].release() # check the pulsar specified is listed in the catalog (result, message) = self.test_pulsar_valid (target_name) if result != "ok": return (result, message) # check the ADC_SYNC_TIME is valid for this beam if self.script.beam_config["ADC_SYNC_TIME"] == "0": return ("fail", "ADC Synchronisation Time was not valid") # set the pulsar name, this should include a check if the pulsar is in the catalog self.script.beam_config["lock"].acquire() if self.script.beam_config["MODE"] == "CAL": target_name = target_name + "_R" self.script.beam_config["SOURCE"] = target_name self.script.beam_config["lock"].release() host = self.script.tcs_host port = self.script.tcs_port self.script.log (2, "request_target_start: opening socket for beam " + beam_id + " to " + host + ":" + str(port)) sock = sockets.openSocket (DL, host, int(port), 1) if sock: xml = self.script.get_xml_config() sock.send(xml + "\r\n") reply = sock.recv (65536) xml = self.script.get_xml_start_cmd() sock.send(xml + "\r\n") reply = sock.recv (65536) sock.close() return ("ok", "") else: return ("fail", "could not connect to TCS") @request(Str()) @return_reply(Str()) def request_target_stop (self, req, data_product_id): """Cease data processing with target_name.""" self.script.log (1, "request_target_stop(" + data_product_id+")") self.script.beam_config["lock"].acquire() self.script.beam_config["SOURCE"] = "" self.script.beam_config["lock"].release() host = self.script.tcs_host port = self.script.tcs_port sock = sockets.openSocket (DL, host, int(port), 1) if sock: xml = self.script.get_xml_stop_cmd () sock.send(xml + "\r\n") reply = sock.recv (65536) sock.close() return ("ok", "") else: return ("fail", "could not connect to tcs[beam]") @request(Str()) @return_reply(Str()) def request_capture_init (self, req, data_product_id): """Prepare the ingest process for data capture.""" self.script.log (1, "request_capture_init: " + str(data_product_id) ) if not data_product_id == self._data_product["id"]: return ("fail", "data product " + str (data_product_id) + " was not configured") return ("ok", "") @request(Str()) @return_reply(Str()) def request_capture_done(self, req, data_product_id): """Terminte the ingest process for the specified data_product_id.""" self.script.log (1, "request_capture_done: " + str(data_product_id)) if not data_product_id == self._data_product["id"]: return ("fail", "data product " + str (data_product_id) + " was not configured") return ("ok", "") @return_reply(Str()) def request_configure(self, req, msg): """Prepare and configure for the reception of the data_product_id.""" self.script.log (1, "request_configure: nargs= " + str(len(msg.arguments)) + " msg=" + str(msg)) if len(msg.arguments) == 0: self.script.log (-1, "request_configure: no arguments provided") return ("ok", "configured data products: TBD") # the sub-array identifier data_product_id = msg.arguments[0] if len(msg.arguments) == 1: self.script.log (1, "request_configure: request for configuration of " + str(data_product_id)) if data_product_id == self._data_product["id"]: configuration = str(data_product_id) + " " + \ str(self._data_product['antennas']) + " " + \ str(self._data_product['n_channels']) + " " + \ str(self._data_product['cbf_source']) self.script.log (1, "request_configure: configuration of " + str(data_product_id) + "=" + configuration) return ("ok", configuration) else: self.script.log (-1, "request_configure: no configuration existed for " + str(data_product_id)) return ("fail", "no configuration existed for " + str(data_product_id)) if len(msg.arguments) == 4: # if the configuration for the specified data product matches extactly the # previous specification for that data product, then no action is required self.script.log (1, "configure: configuring " + str(data_product_id)) if data_product_id == self._data_product["id"] and \ self._data_product['antennas'] == msg.arguments[1] and \ self._data_product['n_channels'] == msg.arguments[2] and \ self._data_product['cbf_source'] == msg.arguments[3]: response = "configuration for " + str(data_product_id) + " matched previous" self.script.log (1, "configure: " + response) return ("ok", response) # the data product requires configuration else: self.script.log (1, "configure: new data product " + data_product_id) # determine which sub-array we are matched against the_sub_array = -1 for i in range(4): self.script.log (1, "configure: testing self.data_product_res[" + str(i) +"].match(" + data_product_id +")") if self.data_product_res[i].match (data_product_id): the_sub_array = i + 1 if the_sub_array == -1: self.script.log (1, "configure: could not match subarray from " + data_product_id) return ("fail", "could not data product to sub array") self.script.log (1, "configure: restarting pubsub for subarray " + str(the_sub_array)) self.script.pubsub.set_sub_array (the_sub_array, self.script.beam_name) self.script.pubsub.restart() antennas = msg.arguments[1] n_channels = msg.arguments[2] cbf_source = msg.arguments[3] # check if the number of existing + new beams > available (cfreq, bwd, nchan) = self.script.cfg["SUBBAND_CONFIG_0"].split(":") if nchan != n_channels: self._data_product.pop(data_product_id, None) response = "PTUSE configured for " + nchan + " channels" self.script.log (-1, "configure: " + response) return ("fail", response) self._data_product['id'] = data_product_id self._data_product['antennas'] = antennas self._data_product['n_channels'] = n_channels self._data_product['cbf_source'] = cbf_source # parse the CBF_SOURCE to determine multicast groups (addr, port) = cbf_source.split(":") (mcast, count) = addr.split("+") self.script.log (2, "configure: parsed " + mcast + "+" + count + ":" + port) if not count == "1": response = "CBF source did not match ip_address+1:port" self.script.log (-1, "configure: " + response) return ("fail", response) mcasts = ["",""] ports = [0, 0] quartets = mcast.split(".") mcasts[0] = ".".join(quartets) quartets[3] = str(int(quartets[3])+1) mcasts[1] = ".".join(quartets) ports[0] = int(port) ports[1] = int(port) self.script.log (1, "configure: connecting to RECV instance to update configuration") for istream in range(int(self.script.cfg["NUM_STREAM"])): (host, beam_idx, subband) = self.script.cfg["STREAM_" + str(istream)].split(":") beam = self.script.cfg["BEAM_" + beam_idx] if beam == self.script.beam_name: # reset ADC_SYNC_TIME on the beam self.script.beam_config["lock"].acquire() self.script.beam_config["ADC_SYNC_TIME"] = "0"; self.script.beam_config["lock"].release() port = int(self.script.cfg["STREAM_RECV_PORT"]) + istream self.script.log (3, "configure: connecting to " + host + ":" + str(port)) sock = sockets.openSocket (DL, host, port, 1) if sock: req = "<?req version='1.0' encoding='ISO-8859-1'?>" req += "<recv_cmd>" req += "<command>configure</command>" req += "<params>" req += "<param key='DATA_MCAST_0'>" + mcasts[0] + "</param>" req += "<param key='DATA_MCAST_1'>" + mcasts[1] + "</param>" req += "<param key='DATA_PORT_0'>" + str(ports[0]) + "</param>" req += "<param key='DATA_PORT_1'>" + str(ports[1]) + "</param>" req += "<param key='META_MCAST_0'>" + mcasts[0] + "</param>" req += "<param key='META_MCAST_1'>" + mcasts[1] + "</param>" req += "<param key='META_PORT_0'>" + str(ports[0]) + "</param>" req += "<param key='META_PORT_1'>" + str(ports[1]) + "</param>" req += "</params>" req += "</recv_cmd>" self.script.log (1, "configure: sending XML req") sock.send(req) recv_reply = sock.recv (65536) self.script.log (1, "configure: received " + recv_reply) sock.close() return ("ok", "data product " + str (data_product_id) + " configured") else: response = "expected 0, 1 or 4 arguments" self.script.log (-1, "configure: " + response) return ("fail", response) @return_reply(Str()) def request_deconfigure(self, req, msg): """Deconfigure for the data_product.""" if len(msg.arguments) == 0: self.script.log (-1, "request_configure: no arguments provided") return ("fail", "expected 1 argument") # the sub-array identifier data_product_id = msg.arguments[0] self.script.log (1, "configure: deconfiguring " + str(data_product_id)) # check if the data product was previously configured if not data_product_id == self._data_product["id"]: response = str(data_product_id) + " did not match configured data product [" + self._data_product["id"] + "]" self.script.log (-1, "configure: " + response) return ("fail", response) for istream in range(int(self.script.cfg["NUM_STREAM"])): (host, beam_idx, subband) = self.script.cfg["STREAM_" + str(istream)].split(":") if self.script.beam_name == self.script.cfg["BEAM_" + beam_idx]: # reset ADC_SYNC_TIME on the beam self.script.beam_config["lock"].acquire() self.script.beam_config["ADC_SYNC_TIME"] = "0"; self.script.beam_config["lock"].release() port = int(self.script.cfg["STREAM_RECV_PORT"]) + istream self.script.log (3, "configure: connecting to " + host + ":" + str(port)) sock = sockets.openSocket (DL, host, port, 1) if sock: req = "<?req version='1.0' encoding='ISO-8859-1'?>" req += "<recv_cmd>" req += "<command>deconfigure</command>" req += "</recv_cmd>" sock.send(req) recv_reply = sock.recv (65536) sock.close() # remove the data product self._data_product["id"] = "None" response = "data product " + str(data_product_id) + " deconfigured" self.script.log (1, "configure: " + response) return ("ok", response) @request(Int()) @return_reply(Str()) def request_output_channels (self, req, nchannels): """Set the number of output channels.""" if not self.test_power_of_two (nchannels): return ("fail", "number of channels not a power of two") if nchannels < 64 or nchannels > 4096: return ("fail", "number of channels not within range 64 - 2048") self.script.beam_config["OUTNCHAN"] = str(nchannels) return ("ok", "") @request(Int()) @return_reply(Str()) def request_output_bins(self, req, nbin): """Set the number of output phase bins.""" if not self.test_power_of_two(nbin): return ("fail", "nbin not a power of two") if nbin < 64 or nbin > 2048: return ("fail", "nbin not within range 64 - 2048") self.script.beam_config["OUTNBIN"] = str(nbin) return ("ok", "") @request(Int()) @return_reply(Str()) def request_output_tsubint (self, req, tsubint): """Set the length of output sub-integrations.""" if tsubint < 10 or tsubint > 60: return ("fail", "length of output subints must be between 10 and 600 seconds") self.script.beam_config["OUTTSUBINT"] = str(tsubint) return ("ok", "") @request(Float()) @return_reply(Str()) def request_dm(self, req, dm): """Set the value of dispersion measure to be removed""" if dm < 0 or dm > 2000: return ("fail", "dm not within range 0 - 2000") self.script.beam_config["DM"] = str(dm) return ("ok", "") @request(Float()) @return_reply(Str()) def request_cal_freq(self, req, cal_freq): """Set the value of noise diode firing frequecny in Hz.""" if cal_freq < 0 or cal_freq > 1000: return ("fail", "CAL freq not within range 0 - 1000") self.script.beam_config["CALFREQ"] = str(cal_freq) if cal_freq == 0: self.script.beam_config["MODE"] = "PSR" else: self.script.beam_config["MODE"] = "CAL" return ("ok", "") # test if a number is a power of two def test_power_of_two (self, num): return num > 0 and not (num & (num - 1)) # test whether the specified target exists in the pulsar catalog def test_pulsar_valid (self, target): self.script.log (2, "test_pulsar_valid: get_psrcat_param (" + target + ", jname)") (reply, message) = self.get_psrcat_param (target, "jname") if reply != "ok": return (reply, message) self.script.log (2, "test_pulsar_valid: get_psrcat_param () reply=" + reply + " message=" + message) if message == target: return ("ok", "") else: return ("fail", "pulsar " + target + " did not exist in catalog") def get_psrcat_param (self, target, param): cmd = "psrcat -all " + target + " -c " + param + " -nohead -o short" rval, lines = self.script.system (cmd, 3) if rval != 0 or len(lines) <= 0: return ("fail", "could not use psrcat") if lines[0].startswith("WARNING"): return ("fail", "pulsar " + target_name + " did not exist in catalog") parts = lines[0].split() if len(parts) == 2 and parts[0] == "1": return ("ok", parts[1])
class BLBackendInterface(AsyncDeviceServer): """Breakthrough Listen's KATCP Server Backend Interface This server responds to requests sent from CAM, most notably: @ configue @ capture-init @ capture-start @ capture-stop @ capture-done @ deconfigure But because it inherits from AsyncDeviceServer, also responds to: * halt * help * log-level * restart [#restartf1]_ * client-list * sensor-list * sensor-sampling * sensor-value * watchdog * version-list (only standard in KATCP v5 or later) * request-timeout-hint (pre-standard only if protocol flags indicates timeout hints, supported for KATCP v5.1 or later) * sensor-sampling-clear (non-standard) """ VERSION_INFO = ("BLUSE-katcp-interface", 1, 0) BUILD_INFO = ("BLUSE-katcp-implementation", 1, 0, "rc?") DEVICE_STATUSES = ["ok", "fail", "degraded"] def __init__(self, server_host, server_port): self.port = server_port self.redis_server = redis.StrictRedis() super(BLBackendInterface, self).__init__(server_host, server_port) def start(self): """Start the server Based on the passed configuration object this is where the clients for suboridnates nodes will be set up. """ super(BLBackendInterface, self).start() print(R""" ,'''''-._ ; ,. <> `-._ ; \' _,--'" ; ( ; , ` \ ;, , \ ; | | MeerKAT BL Backend Interface: ; |, | |\ KATCP Server ; | | | \ Version: {} |.-\ ,\ |\ : Port: {} |.| `. `-. | || :.| `-. \ ';; .- , \;;| ; , | ,\ ; , ; \ https://github.com/ejmichaud/meerkat-backend-interface ; , /`. , ) __,;, ,' \ ,| _,--''__,| / \ : ,'_,-'' | ,/ | : / / | ; ; | | | __,-| |--..__,--| |---.--....___ ___,-| |----'' / | `._`-. `---- \ \ `''' ''' -- `.`. --' `.`-._ _, ,- __,- `-.`. --' `; """.format("{}.{}".format(self.VERSION_INFO[1], self.VERSION_INFO[2]), self.port)) @request(Str(), Str(), Int(), Str(), Str()) @return_reply() def request_configure(self, req, product_id, antennas_csv, n_channels, streams_json, proxy_name): """Receive metadata for upcoming observation. In order to allow BLUSE to make an estimate of its ability to process a particular data product, this command should be used to configure a BLUSE instance when a new subarray is activated. Args: product_id (str): This is a name for the data product, which is a useful tag to include in the data, but should not be analysed further. For example "array_1_bc856M4k". This value will be unique across all subarrays. However, it is not a globally unique identifier for the lifetime of the telescope. The exact same value may be provided at a later time when the same subarray is activated again. antennas_csv (str): A comma separated list of physical antenna names used in particular sub-array to which the data products belongs. n_channels (int): The integer number of frequency channels provided by the CBF. streams_json (str) is a JSON struct containing config keys and values describing the streams. For example: {'stream_type1': { 'stream_name1': 'stream_address1', 'stream_name2': 'stream_address2', ...}, 'stream_type2': { 'stream_name1': 'stream_address1', 'stream_name2': 'stream_address2', ...}, ...} The steam type keys indicate the source of the data and the type, e.g. cam.http. stream_address will be a URI. For SPEAD streams, the format will be spead://<ip>[+<count>]:<port>, representing SPEAD stream multicast groups. When a single logical stream requires too much bandwidth to accommodate as a single multicast group, the count parameter indicates the number of additional consecutively numbered multicast group ip addresses, and sharing the same UDP port number. stream_name is the name used to identify the stream in CAM. A Python example is shown below, for five streams: One CAM stream, with type cam.http. The camdata stream provides the connection string for katportalclient (for the subarray that this BLUSE instance is being configured on). One F-engine stream, with type: cbf.antenna_channelised_voltage. One X-engine stream, with type: cbf.baseline_correlation_products. Two beam streams, with type: cbf.tied_array_channelised_voltage. The stream names ending in x are horizontally polarised, and those ending in y are vertically polarised. proxy_name (str): The CAM name for the instance of the BLUSE data proxy that is being configured. For example, "BLUSE_3". This can be used to query sensors on the correct proxy. Note that for BLUSE there will only be a single instance of the proxy in a subarray. Returns: None... but replies with "ok" or "fail" and logs either info or error Writes: - subbarry1_abc65555:timestamp" -> "1534657577373.23423" :: Redis String - subarray1_abc65555:antennas" -> [1,2,3,4] :: Redis List - subarray1_abc65555:n_channels" -> "4096" :: Redis String - subarray1_abc65555:proxy_name "-> "BLUSE_whatever" :: Redis String - subarray1_abc65555:streams" -> {....} :: Redis Hash !!!CURRENTLY A STRING!!! - current:obs:id -> "subbary1_abc65555" Publishes: redis-channel: 'alerts' <-- "configure" Examples: > ?configure array_1_bc856M4k a1,a2,a3,a4 128000 {"cam.http":{"camdata":"http://monctl.devnmk.camlab.kat.ac.za/api/client/2"},"stream_type2":{"stream_name1":"stream_address1","stream_name2":"stream_address2"}} BLUSE_3 """ try: antennas_list = antennas_csv.split(",") json_dict = unpack_dict(streams_json) cam_url = json_dict['cam.http']['camdata'] except Exception as e: log.error(e) return ("fail", e) statuses = [] statuses.append( write_pair_redis(self.redis_server, "{}:timestamp".format(product_id), time.time())) statuses.append( write_list_redis(self.redis_server, "{}:antennas".format(product_id), antennas_list)) statuses.append( write_pair_redis(self.redis_server, "{}:n_channels".format(product_id), n_channels)) statuses.append( write_pair_redis(self.redis_server, "{}:proxy_name".format(product_id), proxy_name)) statuses.append( write_pair_redis(self.redis_server, "{}:streams".format(product_id), json.dumps(json_dict))) statuses.append( write_pair_redis(self.redis_server, "{}:cam:url".format(product_id), cam_url)) statuses.append( write_pair_redis(self.redis_server, "current:obs:id", product_id)) msg = "configure:{}".format(product_id) statuses.append( publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg)) if all(statuses): return ("ok", ) else: return ("fail", "Failed to publish to our local redis server") @request(Str()) @return_reply() def request_capture_init(self, req, product_id): """Signals that an observation will start soon Publishes a message to the 'alerts' channel of the form: capture-init:product_id The product_id should match what what was sent in the ?configure request This alert should notify all backend processes (such as beamformer) to get ready for data """ msg = "capture-init:{}".format(product_id) success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg) if success: return ("ok", ) else: return ("fail", "Failed to publish to our local redis server") @request(Str()) @return_reply() def request_capture_start(self, req, product_id): """Signals that an observation is starting now Publishes a message to the 'alerts' channel of the form: capture-start:product_id The product_id should match what what was sent in the ?configure request This alert should notify all backend processes (such as beamformer) that they need to be collecting data now """ msg = "capture-start:{}".format(product_id) success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg) if success: return ("ok", ) else: return ("fail", "Failed to publish to our local redis server") @request(Str()) @return_reply() def request_capture_stop(self, req, product_id): """Signals that an observation is has stopped Publishes a message to the 'alerts' channel of the form: capture-stop:product_id The product_id should match what what was sent in the ?configure request This alert should notify all backend processes (such as beamformer) that they should stop collecting data now """ msg = "capture-stop:{}".format(product_id) success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg) if success: return ("ok", ) else: return ("fail", "Failed to publish to our local redis server") @request(Str()) @return_reply() def request_capture_done(self, req, product_id): """Signals that an observation has finished Publishes a message to the 'alerts' channel of the form: capture-done:product_id The product_id should match what what was sent in the ?configure request This alert should notify all backend processes (such as beamformer) that their data streams are ending """ msg = "capture-done:{}".format(product_id) success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg) if success: return ("ok", ) else: return ("fail", "Failed to publish to our local redis server") @request(Str()) @return_reply() def request_deconfigure(self, req, product_id): """Signals that the current data product is done. Deconfigure the BLUSE instance that was created by the call to ?configure with the corresponding product_id. Note: CAM is expected to have sent a ?capture-done request before deconfiguring, in order to ensure that all data has been written. If BLUSE uses an instance of katportalclient to get information from CAM for this BLUSE instance, then it should disconnect at this time. Publishes a message to the 'alerts' channel of the form: deconfigure:product_id The product_id should match what what was sent in the ?configure request This alert should notify all backend processes (such as beamformer) that their data streams are ending """ msg = "deconfigure:{}".format(product_id) success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg) if success: return ("ok", ) else: return ("fail", "Failed to publish to our local redis server") def setup_sensors(self): """ @brief Set up monitoring sensors. @note The following sensors are made available on top of defaul sensors implemented in AsynDeviceServer and its base classes. device-status: Reports the health status of the FBFUSE and associated devices: Among other things report HW failure, SW failure and observation failure. """ self._device_status = Sensor.discrete( "device-status", description="Health status of BLUSE", params=self.DEVICE_STATUSES, default="ok", initial_status=Sensor.NOMINAL) self.add_sensor(self._device_status) self._local_time_synced = Sensor.boolean( "local-time-synced", description="Indicates BLUSE is NTP syncronised.", default=True, # TODO: implement actual NTP synchronization request initial_status=Sensor.NOMINAL) self.add_sensor(self._local_time_synced) self._version = Sensor.string( "version", description="Reports the current BLUSE version", default=str(self.VERSION_INFO[1:]).strip('()').replace( ' ', '').replace(",", '.'), # e.g. '1.0' initial_status=Sensor.NOMINAL) self.add_sensor(self._version) def request_halt(self, req, msg): """Halts the server, logs to syslog and slack, and exits the program Returns ------- success : {'ok', 'fail'} Whether scheduling the halt succeeded. Examples -------- :: ?halt !halt ok TODO: - Call halt method on superclass to avoid copy paste Doing this caused an issue: File "/Users/Eric/Berkeley/seti/packages/meerkat/lib/python2.7/site-packages/katcp/server.py", line 1102, in handle_request assert (reply.mtype == Message.REPLY) AttributeError: 'NoneType' object has no attribute 'mtype' """ f = Future() @gen.coroutine def _halt(): req.reply("ok") yield gen.moment self.stop(timeout=None) raise AsyncReply self.ioloop.add_callback(lambda: chain_future(_halt(), f)) log.critical("HALTING SERVER!!!") # TODO: uncomment when you deploy # notify_slack("KATCP server at MeerKAT has halted. Might want to check that!") sys.exit(0) @request() @return_reply(Str()) def request_find_alien(self, req): """Finds an alien. """ return ("ok", R""" . . . . . . . . . + . . . : . .. :. .___---------___. . . . . :.:. _".^ .^ ^. '.. :"-_. . . : . . .:../: . .^ :.:\. . . :: +. :.:/: . . . . . .:\ . : . . _ :::/: . ^ . . .:\ .. . . . - : :.:./. . .:\ . . . :..|: . . ^. .:| . . : : ..|| . . . !:| . . . . ::. ::\( . :)/ . . : . : .:.|. ###### .#######::| :.. . :- : .: ::|.####### ..########:| . . . .. . .. :\ ######## :######## :/ . .+ :: : -.:\ ######## . ########.:/ . .+ . . . . :.:\. ####### #######..:/ :: . . . . ::.:..:.\ . . ..:/ . . . .. : -::::.\. | | . .:/ . : . . .-:.":.::.\ ..:/ . -. . . . .: .:::.:.\. .:/ . . . : : ....::_:..:\ ___. :/ . . . .:. .. . .: :.:.:\ :/ + . . : . ::. :.:. .:.|\ .:/| . + . . ...:: ..| --.:| . . . . . . . ... :..:.."( ..)" . . . : . .: ::/ . .::\ """)
class MasterController(AsyncDeviceServer): """This is the main KATCP interface for the FBFUSE multi-beam beamformer on MeerKAT. This interface satisfies the following ICDs: CAM-FBFUSE: <link> TUSE-FBFUSE: <link> """ VERSION_INFO = ("mpikat-api", 0, 1) BUILD_INFO = ("mpikat-implementation", 0, 1, "rc1") DEVICE_STATUSES = ["ok", "degraded", "fail"] def __init__(self, ip, port, worker_pool): """ @brief Construct new MasterController instance @params ip The IP address on which the server should listen @params port The port that the server should bind to """ super(MasterController, self).__init__(ip, port) self._products = {} self._katportal_wrapper_type = KatportalClientWrapper self._server_pool = worker_pool def start(self): """ @brief Start the MasterController server """ super(MasterController, self).start() def add_sensor(self, sensor): log.debug("Adding sensor: {}".format(sensor.name)) super(MasterController, self).add_sensor(sensor) def remove_sensor(self, sensor): log.debug("Removing sensor: {}".format(sensor.name)) super(MasterController, self).remove_sensor(sensor) def setup_sensors(self): """ @brief Set up monitoring sensors. @note The following sensors are made available on top of default sensors implemented in AsynDeviceServer and its base classes. device-status: Reports the health status of the controller and associated devices: Among other things report HW failure, SW failure and observation failure. local-time-synced: Indicates whether the local time of the servers is synchronised to the master time reference (use NTP). This sensor is aggregated from all nodes that are part of FBF and will return "not sync'd" if any nodes are unsyncronised. products: The list of product_ids that controller is currently handling """ self._device_status = Sensor.discrete( "device-status", description="Health status of FBFUSE", params=self.DEVICE_STATUSES, default="ok", initial_status=Sensor.NOMINAL) self.add_sensor(self._device_status) self._local_time_synced = Sensor.boolean( "local-time-synced", description="Indicates FBF is NTP syncronised.", default=True, initial_status=Sensor.UNKNOWN) self.add_sensor(self._local_time_synced) def ntp_callback(): try: synced = check_ntp_sync() except Exception as error: log.exception("Unable to check NTP sync") self._local_time_synced.set_value(False) else: if not synced: log.warning("Server is not NTP synced") self._local_time_synced.set_value(synced) self._ntp_callback = PeriodicCallback(ntp_callback, NTP_CALLBACK_PERIOD) self._ntp_callback.start() self._products_sensor = Sensor.string( "products", description="The names of the currently configured products", default="", initial_status=Sensor.UNKNOWN) self.add_sensor(self._products_sensor) def _update_products_sensor(self): self._products_sensor.set_value(",".join(self._products.keys())) def _get_product(self, product_id): if product_id not in self._products: raise ProductLookupError( "No product configured with ID: {}".format(product_id)) else: return self._products[product_id] @request(Str(), Int()) @return_reply() def request_register_worker_server(self, req, hostname, port): """ @brief Register an WorkerWrapper instance @params hostname The hostname for the worker server @params port The port number that the worker server serves on @detail Register an WorkerWrapper instance that can be used for FBFUSE computation. FBFUSE has no preference for the order in which control servers are allocated to a subarray. An WorkerWrapper wraps an atomic unit of compute comprised of one CPU, one GPU and one NIC (i.e. one NUMA node on an FBFUSE compute server). """ log.debug("Received request to register worker server at {}:{}".format( hostname, port)) self._server_pool.add(hostname, port) return ("ok", ) @request(Str(), Int()) @return_reply() def request_deregister_worker_server(self, req, hostname, port): """ @brief Deregister an WorkerWrapper instance @params hostname The hostname for the worker server @params port The port number that the worker server serves on @detail The graceful way of removing a server from rotation. If the server is currently actively processing an exception will be raised. """ log.debug( "Received request to deregister worker server at {}:{}".format( hostname, port)) try: self._server_pool.remove(hostname, port) except ServerDeallocationError as error: log.error( "Request to deregister worker server at {}:{} failed with error: {}" .format(hostname, port, str(error))) return ("fail", str(error)) else: return ("ok", ) @request() @return_reply(Int()) def request_worker_server_list(self, req): """ @brief List all control servers and provide minimal metadata """ for server in self._server_pool.used(): req.inform("{} allocated".format(server)) for server in self._server_pool.available(): req.inform("{} free".format(server)) return ("ok", len(self._server_pool.used()) + len(self._server_pool.available())) @request() @return_reply(Int()) def request_product_list(self, req): """ @brief List all currently registered products and their states @param req A katcp request object @note The details of each product are provided via an #inform as a JSON string containing information on the product state. @return katcp reply object [[[ !product-list ok | (fail [error description]) <number of configured products> ]]], """ for product_id, product in self._products.items(): info = {} info[product_id] = product.info() as_json = json.dumps(info) req.inform(as_json) return ("ok", len(self._products)) @request(Str(), Str()) @return_reply() def request_set_default_target_configuration(self, req, product_id, target): """ @brief Set the configuration of FBFUSE from the FBFUSE configuration server @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". @param target A KATPOINT target string """ try: product = self._get_product(product_id) except ProductLookupError as error: return ("fail", str(error)) try: target = Target(target) except Exception as error: return ("fail", str(error)) if not product.capturing: return ( "fail", "Product must be capturing before a target confiugration can be set." ) product.reset_beams() # TBD: Here we connect to some database and request the default configurations # For example this may return secondary target in the FoV # # As a default the current system will put one beam directly on target and # the rest of the beams in a static tiling pattern around this target now = time.time() nbeams = product._beam_manager.nbeams product.add_tiling(target, nbeams - 1, 1.4e9, 0.5, now) product.add_beam(target) return ("ok", ) @request(Str(), Str()) @return_reply() def request_set_default_sb_configuration(self, req, product_id, sb_id): """ @brief Set the configuration of FBFUSE from the FBFUSE configuration server @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". @param sb_id The schedule block ID. Decisions of the configuarion of FBFUSE will be made dependent on the configuration of the current subarray, the primary and secondary science projects active and the targets expected to be visted during the execution of the schedule block. """ try: product = self._get_product(product_id) except ProductLookupError as error: return ("fail", str(error)) if product.capturing: return ("fail", "Cannot reconfigure a currently capturing instance.") product.configure_coherent_beams(400, product._katpoint_antennas, 1, 16) product.configure_incoherent_beam(product._katpoint_antennas, 1, 16) now = time.time() nbeams = product._beam_manager.nbeams product.add_tiling(target, nbeams - 1, 1.4e9, 0.5, now) product.add_beam(target) return ("ok", )
def test_unpack_types_more_types_than_args(self): expected = ['one', 2, True, None] self.check_unpacking( [Str(), Int(), Bool(default=True), Str(optional=True)], [b'one', b'2'], expected)
class TestDevice(object): def __init__(self): self.sent_messages = [] @request(Int(min=1, max=10), Discrete(("on", "off")), Bool()) @return_reply(Int(min=1, max=10), Discrete(("on", "off")), Bool()) def request_one(self, sock, i, d, b): if i == 3: return ("fail", "I failed!") if i == 5: return ("bananas", "This should never be sent") if i == 6: return ("ok", i, d, b, "extra parameter") if i == 9: # This actually gets put in the callback params automatically orig_msg = Message.request("one", "foo", "bar") self.finish_request_one(orig_msg, sock, i, d, b) raise AsyncReply() return ("ok", i, d, b) @send_reply(Int(min=1, max=10), Discrete(("on", "off")), Bool()) def finish_request_one(self, msg, sock, i, d, b): return (sock, msg, "ok", i, d, b) def reply(self, sock, msg, orig_msg): self.sent_messages.append([sock, msg]) @request(Int(min=1, max=3, default=2), Discrete(("on", "off"), default="off"), Bool(default=True)) @return_reply(Int(min=1, max=3), Discrete(("on", "off")), Bool()) def request_two(self, sock, i, d, b): return ("ok", i, d, b) @return_reply(Int(min=1, max=3), Discrete(("on", "off")), Bool()) @request(Int(min=1, max=3), Discrete(("on", "off")), Bool()) def request_three(self, sock, i, d, b): return ("ok", i, d, b) @return_reply() @request() def request_four(self, sock): return ["ok"] @inform(Int(min=1, max=3), Discrete(("on", "off")), Bool()) def inform_one(self, sock, i, d, b): pass @request(Int(min=1, max=3), Discrete(("on", "off")), Bool()) @return_reply(Int(min=1, max=3), Discrete(("on", "off")), Bool()) def request_five(self, i, d, b): return ("ok", i, d, b) @return_reply(Int(min=1, max=3), Discrete(("on", "off")), Bool()) @request(Int(min=1, max=3), Discrete(("on", "off")), Bool()) def request_six(self, i, d, b): return ("ok", i, d, b) @return_reply(Int(), Str()) @request(Int(), include_msg=True) def request_seven(self, msg, i): return ("ok", i, msg.name) @return_reply(Int(), Str()) @request(Int(), include_msg=True) def request_eight(self, sock, msg, i): return ("ok", i, msg.name)
class MyServer(AsyncDeviceServer): VERSION_INFO = ("example-api", 1, 0) BUILD_INFO = ("example-implementation", 0, 1, "") # Optionally set the KATCP protocol version and features. Defaults to # the latest implemented version of KATCP, with all supported optional # featuresthat's all of the receivers PROTOCOL_INFO = ProtocolFlags(5, 0, set([ ProtocolFlags.MULTI_CLIENT, ProtocolFlags.MESSAGE_IDS, ])) FRUIT = [ "apple", "banana", "pear", "kiwi", ] def setup_sensors(self): """Setup some server sensors.""" self._add_result = Sensor.float("add.result", "Last ?add result.", "", [-10000, 10000]) self._time_result = Sensor.timestamp("time.result", "Last ?time result.", "") self._eval_result = Sensor.string("eval.result", "Last ?eval result.", "") self._fruit_result = Sensor.discrete("fruit.result", "Last ?pick-fruit result.", "", self.FRUIT) self._device_armed = Sensor.boolean( "device-armed", description="Is the CAM server armed?", initial_status=Sensor.NOMINAL, default=True) self._bandwidth = Sensor.float("bandwidth", default=300) self._sourcename = Sensor.string("sourcename", default="none") self._source_ra = Sensor.string("source_RA", default=0) self._source_dec = Sensor.string("source_DEC", default=0) self._exposure_time = Sensor.float("EXP_time", default=0) self.add_sensor(self._sourcename) self.add_sensor(self._source_ra) self.add_sensor(self._source_dec) self.add_sensor(self._exposure_time) self.add_sensor(self._bandwidth) self.add_sensor(self._device_armed) self.add_sensor(self._add_result) self.add_sensor(self._time_result) self.add_sensor(self._eval_result) self.add_sensor(self._fruit_result) self._systemp_result = Sensor.float("add.result", "Last ?add result.", "", [-10000, 10000]) self.add_sensor(self._systemp_result) ##self._bandwidth = Sensor.float("bandwidth", default=300) #self.add_sensor(self._bandwidth) @request() @return_reply(Str()) def request_bandwidth(self, req, bw): """Return the Bandwidth""" #req.inform("checking armed status", self._device_armed.value()) req.reply("ok", bw) raise AsyncReply @request() @return_reply(Str()) def request_status_armed(self, req): """Return the state of the Armed/Disarmed""" req.inform("checking armed status", self._device_armed.value()) req.reply("ok", self._device_armed.value()) raise AsyncReply @request(Float()) @return_reply() def request_long_action(self, req, t): """submit a long action command for testing using coroutine""" @tornado.gen.coroutine def wait(): yield tornado.gen.sleep(t) req.reply("slept for", t, "second") self.ioloop.add_callback(wait) raise AsyncReply @request(Float(), Float()) @return_reply(Str()) def request_radec(self, req, ra, dec): """testing to read in the RA DEC fomr a client""" # test=ra+dec self.ra = ra self.dec = dec return ("ok", "%f %f" % (self.ra, self.dec)) @request(Float(), Float()) @return_reply(Float()) def request_add(self, req, x, y): """Add two numbers""" r = x + y self._add_result.set_value(r) return ("ok", r) @request() @return_reply(Str()) def request_arm(self, req): """Arm the controller""" @tornado.gen.coroutine def start_controller(): req.inform("processing", "command processing") try: yield tornado.gen.sleep(10) except Exception as error: req.reply("fail", "Unknown error: {0}".format(str(error))) else: req.reply("ok", "effcam armed") self._device_armed.set_value(True) if self._device_armed.value(): return ("fail", "Effcam is already armed") self.ioloop.add_callback(start_controller) raise AsyncReply @request() @return_reply(Str()) def request_disarm(self, req): """disarm the controller""" @tornado.gen.coroutine # @coroutine def stop_controller(): req.inform("processing", "processing command") try: yield tornado.gen.sleep(10) # yield self._controller.stop() except Exception as error: req.reply("fail", "Unknown error: {0}".format(str(error))) else: req.reply("ok", "effcam disarmed") self._device_armed.set_value(False) if self._device_armed.value() == False: return ("fail", "Effcam is already disarmed") self.ioloop.add_callback(stop_controller) raise AsyncReply @request() @return_reply(Str()) def request_status_temp(self, req): """Return the current temp""" #r = time.time() t = "36" # self._time_result.set_value(r) return ("ok", t) @request() @return_reply(Timestamp()) def request_status_time(self, req): """Return the current time in seconds since the Unix Epoch.""" req.inform("processing", "processing command") r = time.time() # self._time_result.set_value(r) req.reply("ok", r) raise AsyncReply # return ("ok", r) @request() @return_reply(Timestamp(), Str()) def request_status_time_and_temp(self, req): """Return the current time in seconds since the Unix Epoch.""" req.inform("processing", "processing command") r = time.time() # self._time_result.set_value(r) t = "36" req.reply("ok", r, t) raise AsyncReply @request(Str()) @return_reply() def request_configure(self, req, config): """Return ok.""" print "{} received configuration {}".format(Time.now(),config) self.config = config time.sleep(1) req.reply("ok",) raise AsyncReply @request(Str()) @return_reply() def request_provision(self, req, config): """Return ok.""" print "{} received provision {}".format(Time.now(),config) self.config = config time.sleep(1) req.reply("ok",) raise AsyncReply @request(Str()) @return_reply() def request_measurement_prepare(self, req, config): """Return ok.""" print "{} received measurement prepare {}".format(Time.now(),config) self.config = config time.sleep(1) req.reply("ok",) raise AsyncReply @request(Str()) @return_reply() def request_configure(self, req, config): """Return ok.""" print "{} received configuration {}".format(Time.now(),config) self.config = config time.sleep(1) req.reply("ok",) raise AsyncReply @request() @return_reply(Str()) def request_status_config(self, req): """Return ok.""" req.reply("ok", "{}".format(self.config)) raise AsyncReply @request() @return_reply() def request_capture_start(self, req): """Return ok.""" print "{} received capture start request on port :{}".format(Time.now(), server_port) req.reply("ok") raise AsyncReply @request() @return_reply() def request_capture_stop(self, req): """Return ok.""" print "{} received capture stop request on port :{}".format(Time.now(), server_port) req.reply("ok") raise AsyncReply @request() @return_reply() def request_measurement_start(self, req): """Return ok.""" print "{} received measurement start request on port :{}".format(Time.now(), server_port) req.reply("ok") raise AsyncReply @request() @return_reply() def request_measurement_stop(self, req): """Return ok.""" print "{} received measurement stop request on port :{}".format(Time.now(), server_port) req.reply("ok") raise AsyncReply @request() @return_reply() def request_deconfigure(self, req): """Return ok.""" print "{} received deconfigure request on port :{}".format(Time.now(), server_port) req.reply("ok") raise AsyncReply @request() @return_reply() def request_deprovision(self, req): """Return ok.""" print "{} received deprovision request on port :{}".format(Time.now(), server_port) req.reply("ok") raise AsyncReply() @return_reply() def request_start(self, req): """Return ok.""" print "{} received start request on port :{}".format(Time.now(), server_port) req.reply("ok") raise AsyncReply @request() @return_reply() def request_stop(self, req): """Return ok.""" print "{} received stop request on port :{}".format(Time.now(), server_port) req.reply("ok") raise AsyncReply
def test_unpack_types_single(self): expected = ['one'] self.check_unpacking([Str()], [b'one'], expected)
class TestDevice(object): def __init__(self): self.sent_messages = [] @request(Int(min=1, max=10), Discrete(("on", "off")), Bool()) @return_reply(Int(min=1, max=10), Discrete(("on", "off")), Bool()) def request_one(self, req, i, d, b): if i == 3: return ("fail", "I failed!") if i == 5: return ("bananas", "This should never be sent") if i == 6: return ("ok", i, d, b, "extra parameter") if i == 9: self.finish_request_one(req, i, d, b) raise AsyncReply() return ("ok", i, d, b) @send_reply(Int(min=1, max=10), Discrete(("on", "off")), Bool()) def finish_request_one(self, req, i, d, b): return (req, "ok", i, d, b) def reply(self, req, msg, orig_msg): self.sent_messages.append([req, msg]) @request(Int(min=1, max=3, default=2), Discrete(("on", "off"), default="off"), Bool(default=True)) @return_reply(Int(min=1, max=3), Discrete(("on", "off")), Bool()) def request_two(self, req, i, d, b): return ("ok", i, d, b) @return_reply(Int(min=1, max=3), Discrete(("on", "off")), Bool()) @request(Int(min=1, max=3), Discrete(("on", "off")), Bool()) def request_three(self, req, i, d, b): return ("ok", i, d, b) @return_reply() @request() def request_four(self, req): return ["ok"] @inform(Int(min=1, max=3), Discrete(("on", "off")), Bool()) def inform_one(self, i, d, b): pass @request(Timestamp(), Timestamp(optional=True), major=4) @return_reply(Timestamp(), Timestamp(default=321), major=4) def request_katcpv4_time(self, req, timestamp1, timestamp2): self.katcpv4_time1 = timestamp1 self.katcpv4_time2 = timestamp2 if timestamp2: return ('ok', timestamp1, timestamp2) else: return ('ok', timestamp1) @request(Timestamp(multiple=True), major=4) @return_reply(Timestamp(multiple=True), major=4) def request_katcpv4_time_multi(self, req, *timestamps): self.katcpv4_time_multi = timestamps return ('ok', ) + timestamps @return_reply(Int(), Str()) @request(Int(), include_msg=True) def request_eight(self, req, msg, i): return ("ok", i, msg.name) @request(Int(), Float(multiple=True)) @return_reply(Int(), Float(multiple=True)) def request_int_multifloat(self, req, i, *floats): return ('ok', i) + floats
def test_unpack_types_many_without_multiple(self): expected = ['one', 2] self.check_unpacking([Str(), Int()], [b'one', b'2'], expected)
class PafWorkerServer(AsyncDeviceServer): """ @brief Interface object which accepts KATCP commands """ VERSION_INFO = ("mpikat-paf-api", 1, 0) BUILD_INFO = ("mpikat-paf-implementation", 0, 1, "rc1") DEVICE_STATUSES = ["ok", "degraded", "fail"] PIPELINE_STATES = [ "idle", "configuring", "ready", "starting", "running", "stopping", "deconfiguring", "error" ] def __init__(self, ip, port): """ @brief Initialization of the PafWorkerServer object @param ip IP address of the server @param port port of the PafWorkerServer """ super(PafWorkerServer, self).__init__(ip, port) self.ip = ip def state_change(self, state, callback): """ @brief callback function for state changes @parma callback object return from the callback function from the pipeline """ log.info('New state of the pipeline is {}'.format(str(state))) self._pipeline_sensor_status.set_value(str(state)) def start(self): super(PafWorkerServer, self).start() def setup_sensors(self): """ @brief Setup monitoring sensors """ self._device_status = Sensor.discrete( "device-status", "Health status of PafWorkerServer", params=self.DEVICE_STATUSES, default="ok", initial_status=Sensor.UNKNOWN) self.add_sensor(self._device_status) self._pipeline_sensor_name = Sensor.string("pipeline-name", "the name of the pipeline", "") self.add_sensor(self._pipeline_sensor_name) self._pipeline_sensor_status = Sensor.discrete( "pipeline-status", description="Status of the pipeline", params=self.PIPELINE_STATES, default="idle", initial_status=Sensor.UNKNOWN) self.add_sensor(self._pipeline_sensor_status) @request(Str()) @return_reply(Str()) def request_configure(self, req, pipeline_name): """ @brief Configure pipeline @param pipeline name of the pipeline """ @coroutine def configure_pipeline(): self._pipeline_sensor_name.set_value(pipeline_name) log.info("Configuring pipeline {}".format( self._pipeline_sensor_name.value())) try: _pipeline_type = PIPELINES[self._pipeline_sensor_name.value()] except KeyError as error: msg = "No pipeline called '{}', available pipeline are: \n{}".format( self._pipeline_sensor_name.value(), "\n".join(PIPELINES.keys())) log.info("{}".format(msg)) req.reply("fail", msg) self._pipeline_sensor_status.set_value("error") self._pipeline_sensor_name.set_value("") raise error self._pipeline_instance = _pipeline_type() self._pipeline_instance.callbacks.add(self.state_change) try: self._pipeline_instance.configure() except Exception as error: msg = "Couldn't start configure pipeline instance {}".format( str(error)) log.info("{}".format(msg)) req.reply("fail", msg) self._pipeline_sensor_status.set_value("error") self._pipeline_sensor_name.set_value("") raise error msg = "pipeline instance configured" log.info("{}".format(msg)) req.reply("ok", msg) if self._pipeline_sensor_status.value() == "idle": self.ioloop.add_callback(configure_pipeline) raise AsyncReply else: msg = "Can't Configure, status = {}".format( self._pipeline_sensor_status.value()) log.info("{}".format(msg)) return ("fail", msg) @request() @return_reply(Str()) def request_start(self, req): """ @brief Start pipeline """ @coroutine def start_pipeline(): try: self._pipeline_instance.start() except Exception as error: msg = "Couldn't start pipeline server {}".format(error) log.info("{}".format(msg)) req.reply("fail", msg) self._pipeline_sensor_status.set_value("error") raise error msg = "Start pipeline {}".format( self._pipeline_sensor_name.value()) log.info("{}".format(msg)) req.reply("ok", msg) if self._pipeline_sensor_status.value() == "ready": self.ioloop.add_callback(start_pipeline) raise AsyncReply else: msg = "pipeline is not in the state of configured, status = {} ".format( self._pipeline_sensor_status.value()) log.info("{}".format(msg)) return ("fail", msg) @request() @return_reply(Str()) def request_stop(self, req): """ @brief Stop pipeline """ @coroutine def stop_pipeline(): self._pipeline_sensor_status.set_value("stopping") try: self._pipeline_instance.stop() except Exception as error: msg = "Couldn't stop pipeline {}".format(error) log.info("{}".format(msg)) req.reply("fail", msg) self._pipeline_sensor_status.set_value("error") raise error msg = "Stop pipeline {}".format(self._pipeline_sensor_name.value()) log.info("{}".format(msg)) req.reply("ok", msg) if self._pipeline_sensor_status.value() == "running": self.ioloop.add_callback(stop_pipeline) raise AsyncReply else: msg = "nothing to stop, status = {}".format( self._pipeline_sensor_status.value()) log.info("{}".format(msg)) return ("fail", msg) @request() @return_reply(Str()) def request_deconfigure(self, req): """ @brief Deconfigure pipeline """ @coroutine def deconfigure(): log.info("deconfiguring pipeline {}".format( self._pipeline_sensor_name.value())) try: self._pipeline_instance.deconfigure() except Exception as error: msg = "Couldn't deconfigure pipeline {}".format(error) log.info("{}".format(msg)) req.reply("fail", msg) self._pipeline_sensor_status.set_value("error") raise error msg = "deconfigured pipeline {}".format( self._pipeline_sensor_name.value()) log.info("{}".format(msg)) req.reply("ok", msg) self._pipeline_sensor_name.set_value("") if self._pipeline_sensor_status.value() == "ready": self.ioloop.add_callback(deconfigure) raise AsyncReply else: msg = "nothing to deconfigure, status = {}".format( self._pipeline_sensor_status.value()) log.info("{}".format(msg)) return ("fail", msg) @request() @return_reply(Str()) def request_avail_pipeline(self, req): """ @brief Get availiable pipelines """ log.info("Requesting list of available pipeline") for key in PIPELINES.keys(): req.inform("{}".format(key)) return ("ok", len(PIPELINES))
class PafWorkerServer(AsyncDeviceServer): """ @brief Interface object which accepts KATCP commands """ VERSION_INFO = ("mpikat-paf-api", 1, 0) BUILD_INFO = ("mpikat-paf-implementation", 0, 1, "rc1") DEVICE_STATUSES = ["ok", "degraded", "fail"] PIPELINE_STATES = [ "idle", "configuring", "ready", "starting", "running", "stopping", "deconfiguring", "error" ] def __init__(self, ip, port): """ @brief Initialization of the PafWorkerServer object @param ip IP address of the server @param port port of the PafWorkerServer """ super(PafWorkerServer, self).__init__(ip, port) self.ip = ip self._managed_sensors = [] def add_pipeline_sensors(self): """ @brief Add pipeline sensors to the managed sensors list """ for sensor in self._pipeline_instance.sensors: self.add_sensor(sensor) self._managed_sensors.append(sensor) self.mass_inform(Message.inform('interface-changed')) def remove_pipeline_sensors(self): """ @brief Remove pipeline sensors from the managed sensors list """ for sensor in self._managed_sensors: self.remove_sensor(sensor) self._managed_sensors = [] self.mass_inform(Message.inform('interface-changed')) def state_change(self, state, callback): """ @brief callback function for state changes @parma callback object return from the callback function from the pipeline """ log.info('New state of the pipeline is {}'.format(str(state))) self._pipeline_sensor_status.set_value(str(state)) @coroutine def start(self): """Start PafWorkerServer server""" super(PafWorkerServer, self).start() @coroutine def stop(self): """Stop PafWorkerServer server""" yield super(PafWorkerServer, self).stop() def setup_sensors(self): """ @brief Setup monitoring sensors """ self._device_status = Sensor.discrete( "device-status", "Health status of PafWorkerServer", params=self.DEVICE_STATUSES, default="ok", initial_status=Sensor.NOMINAL) self.add_sensor(self._device_status) self._pipeline_sensor_name = Sensor.string("pipeline-name", "the name of the pipeline", "") self.add_sensor(self._pipeline_sensor_name) self._pipeline_sensor_status = Sensor.discrete( "pipeline-status", description="Status of the pipeline", params=self.PIPELINE_STATES, default="idle", initial_status=Sensor.NOMINAL) self.add_sensor(self._pipeline_sensor_status) self._ip_address = Sensor.string( "ip", description="the ip address of the node controller", default=os.environ['PAF_NODE_IP'], initial_status=Sensor.NOMINAL) self.add_sensor(self._ip_address) self._mac_address = Sensor.string( "mac", description="the mac address of the node controller", default=os.environ['PAF_NODE_MAC'], initial_status=Sensor.NOMINAL) self.add_sensor(self._mac_address) @request(Str()) @return_reply() def request_configure(self, req, config_json): """ @brief Configure pipeline @param pipeline name of the pipeline """ @coroutine def configure_wrapper(): try: self.configure(config_json) except Exception as error: log.exception(str(error)) req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(configure_wrapper) raise AsyncReply @coroutine def configure(self, config_json): try: config_dict = json.loads(config_json) pipeline_name = config_dict["mode"] except KeyError as error: msg = "Error getting the pipeline name from config_json: {}".format( str(error)) log.error(msg) raise PafPipelineKeyError(msg) self._pipeline_sensor_name.set_value(pipeline_name) log.info("Configuring pipeline {}".format( self._pipeline_sensor_name.value())) try: #log.info("Configuring pipeline {}".format() _pipeline_type = PIPELINES[self._pipeline_sensor_name.value()] except KeyError as error: msg = "No pipeline called '{}', available pipeline are: {}".format( self._pipeline_sensor_name.value(), " ".join(PIPELINES.keys())) self._pipeline_sensor_name.set_value("") log.error(msg) raise PafPipelineKeyError(msg) log.info("Configuring pipeline continute") try: log.debug( "Trying to create pipeline instance: {}".format(pipeline_name)) self._pipeline_instance = _pipeline_type() self.add_pipeline_sensors() self._pipeline_instance.callbacks.add(self.state_change) config = json.loads(config_json) log.debug("Unpacked config: {}".format(config)) capture_start_time = Time.now() + 27.0 * units.s frequency = config["frequency"] self._pipeline_instance.configure(capture_start_time, frequency, self._ip_address.value()) except Exception as error: self._pipeline_sensor_name.set_value("") msg = "Couldn't start configure pipeline instance {}".format( str(error)) log.error(msg) raise PafPipelineError(msg) else: log.info("pipeline instance {} configured".format( self._pipeline_sensor_name.value())) @request() @return_reply(Str()) def request_deconfigure(self, req): """ @brief Deconfigure pipeline """ @coroutine def deconfigure_wrapper(): try: yield self.deconfigure() except Exception as error: log.exception(str(error)) req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(deconfigure_wrapper) raise AsyncReply @coroutine def deconfigure(self): log.info("Deconfiguring pipeline {}".format( self._pipeline_sensor_name.value())) try: self.remove_pipeline_sensors() self._pipeline_instance.deconfigure() del self._pipeline_instance except Exception as error: msg = "Couldn't deconfigure pipeline {}".format(str(error)) log.error(msg) raise PafPipelineError(msg) else: log.info("Deconfigured pipeline {}".format( self._pipeline_sensor_name.value())) self._pipeline_sensor_name.set_value("") @request(Str()) @return_reply(Str()) def request_start(self, req, config_json): """ @brief Start pipeline """ @coroutine def start_wrapper(): try: yield self.start_pipeline(config_json) except Exception as error: log.exception(str(error)) req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(start_wrapper) raise AsyncReply @coroutine def start_pipeline(self, config_json): try: config = json.loads(config_json) utc_start_process = Time.now() + 15 * units.second source_name = config["source-name"] ra = config["ra"] dec = config["dec"] self._pipeline_instance.start(utc_start_process, source_name, ra, dec) except Exception as error: msg = "Couldn't start pipeline server {}".format(str(error)) log.error(msg) raise PafPipelineError(msg) else: log.info("Starting pipeline {}".format( self._pipeline_sensor_name.value())) @request() @return_reply(Str()) def request_stop(self, req): """ @brief Stop pipeline """ @coroutine def stop_wrapper(): try: yield self.stop_pipeline() except Exception as error: log.exception(str(error)) req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(stop_wrapper) raise AsyncReply @coroutine def stop_pipeline(self): try: self._pipeline_instance.stop() except Exception as error: msg = "Couldn't stop pipeline {}".format(str(error)) log.error(msg) raise PafPipelineError(msg) else: log.info("Stopping pipeline {}".format( self._pipeline_sensor_name.value())) @request() @return_reply(Str()) def request_avail_pipeline(self, req): """ @brief Get availiable pipelines """ log.info("Requesting list of available pipeline") for key in PIPELINES.keys(): req.inform("{}".format(key)) return ("ok", len(PIPELINES))
class EDDPipeline(AsyncDeviceServer): """ Abstract interface for EDD Pipelines Pipelines can implement functions to act within the following sequence of commands with associated state changes. After provisioning the pipeline is in state idle. ?set "partial config" Updates the current configuration with the provided partial config. After set, it remains in state idle as only the config dictionary may have changed. A wrong config is rejected without changing state as state remains valid. Multiple set commands can bes end to the pipeline. ?configure "partial config" state change from idle to configuring and configured (on success) or error (on fail) ?capture_start state change from configured to streaming or ready (on success) or error (on fail). Streaming indicates that no further changes to the state are expected and data is injected into the EDD. ?measurement_prepare "data" state change from ready to set or error ?measurement_start state change from set to running or error ?measurement_stop state change from running to set or error ?capture_stop return to state configured or idle ?deconfigure restore state idle * configure - optionally does a final update of the current config and prepares the pipeline. Configuring the pipeline may take time, so all lengthy preparations should be done here. * capture start - The pipeline should send data (into the EDD) after this command. * measurement prepare - receive optional configuration before each measurement. The pipeline must not stop streaming on update. * measurement start - Start of an individual measurement. Should be quasi instantaneous. E.g. a recorder should be already connected to the data stream and just start writing to disk. * measurement stop - Stop the measurement Pipelines can also implement: * populate_data_store to send data to the store. The address and port for a data store is received along the request. """ PIPELINE_STATES = [ "idle", "configuring", "configured", "capture_starting", "streaming", "ready", "measurement_preparing", "set", "measurement_starting", "measuring", "running", "measurement_stopping", "capture_stopping", "deconfiguring", "error", "panic", "unprovisioned", "provisioning", "deprovisioning" ] def __init__(self, ip, port, default_config={}): """ Initialize the pipeline. Subclasses are required to provide their default config dict and specify the data formats definied by the class, if any. Args: ip: ip to accept connections from port: port to listen default_config: default config of the pipeline """ self._state = "idle" self.previous_state = "unprovisioned" self._sensors = [] # inject data store dat into all default configs. default_config.setdefault("data_store", dict(ip="localhost", port=6379)) default_config.setdefault("id", "Unspecified") default_config.setdefault("type", self.__class__.__name__) default_config.setdefault("input_data_streams", []) default_config.setdefault("output_data_streams", []) default_config["ip"] = socket.gethostname() default_config["port"] = port for stream in value_list(default_config['input_data_streams']): stream.setdefault("source", "") if not stream.get('format'): log.warning("Input stream without format definition!") continue for key, value in EDDDataStore.data_formats[ stream['format']].items(): stream.setdefault(key, value) for stream in value_list(default_config['output_data_streams']): if not stream.get('format'): log.warning("Output stream without format definition!") continue for key, value in EDDDataStore.data_formats[ stream['format']].items(): stream.setdefault(key, value) self.__config = default_config.copy() self._default_config = default_config self._subprocesses = [] self._subprocessMonitor = None AsyncDeviceServer.__init__(self, ip, port) #update the docstrings for the requests by their subclass implementation for r, s in [(self.request_configure, self.configure), (self.request_set, self.set), (self.request_capture_start, self.capture_start), (self.request_capture_stop, self.capture_stop), (self.request_measurement_start, self.measurement_start), (self.request_measurement_stop, self.measurement_stop)]: r.__func__.__doc__ = s.__doc__ @property def _config(self): """ The current configuration of the pipeline, i.e. the default after all updates received via set and configure commands. This value should then be used in the _configure method. """ return self.__config @_config.setter def _config(self, value): if not isinstance(value, dict): raise RuntimeError("_config has to be a dict!") if value == self.__config: log.debug("No changes in config, not updating sensor") else: self.__config = value self._configUpdated() def _configUpdated(self): """ Signals that the config dict has been updated. Seperate method as direct updates of _config items without writing a full dict to _config will noy trigger the _config.setter and have to call this method manually. """ self._edd_config_sensor.set_value(json.dumps(self.__config, indent=4)) def setup_sensors(self): """ Setup monitoring sensors. The EDDPipeline base provides default sensors. Should be called by every subclass to ensure default sensors are available. """ self._pipeline_sensor_status = Sensor.discrete( "pipeline-status", description="Status of the pipeline", params=self.PIPELINE_STATES, default="idle", initial_status=Sensor.UNKNOWN) self.add_sensor(self._pipeline_sensor_status) self._edd_config_sensor = Sensor.string( "current-config", description="The current configuration for the EDD backend", default=json.dumps(self._config, indent=4), initial_status=Sensor.UNKNOWN) self.add_sensor(self._edd_config_sensor) self._log_level = Sensor.string("log-level", description="Log level", default=logging.getLevelName( log.level), initial_status=Sensor.NOMINAL) self.add_sensor(self._log_level) @request(Str(default='INFO')) @return_reply() def request_set_log_level(self, req, level): """ Sets the log level Return: katcp reply object [[[ !configure ok | (fail [error description]) ]]] """ @coroutine def wrapper(): try: log.info("Setting log level to: {}".format(level.upper())) logger = logging.getLogger('mpikat') logger.setLevel(level.upper()) self._log_level.set_value(level.upper()) log.debug("Successfully set log-level") except FailReply as fr: req.reply("fail", str(fr)) except Exception as error: log.exception(str(error)) req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(wrapper) raise AsyncReply @request() @return_reply() def request_whoami(self, req): """ Returns the name of the controlled pipeline Return: katcp reply object """ @coroutine def wrapper(): r = "\n Id: {}\n Classname: {}\n Type: {}\n".format( self._config['id'], self.__class__.__name__, self._config['type']) req.reply(r) self.ioloop.add_callback(wrapper) raise AsyncReply @request(Str(default="")) @return_reply() def request_log_level(self, req, *args): """ Overrides the katcp default log level to enable the katcp logger Return: katcp reply object """ @coroutine def wrapper(): r = "Command ignored, as this would enable the katcp internal logging! Use set-log-level to set the EDD log level or set-katcp-internal-log-level instead." req.reply(r) self.ioloop.add_callback(wrapper) raise AsyncReply # Raw access to req + msg needed, hence no decorator def request_set_katcp_internal_log_level(self, req, msg): """ Set the katcp internal log level Return: katcp reply object """ return AsyncDeviceServer.request_log_level(self, req, msg) @property def sensors(self): return self._sensors @property def state(self): """ State of the pipeline. """ return self._state @state.setter def state(self, value): log.info("Changing state: {} -> {}".format(self.previous_state, value)) self.previous_state = self._state self._state = value self._pipeline_sensor_status.set_value(self._state) @request(Str()) @return_reply() def request_override_state(self, req, value): """ Sets the state of the pipeline manually to a given value. Return: katcp reply object [[[ !configure ok | (fail [error description]) ]]] """ if value not in self.PIPELINE_STATES: log.warning( "Trying to overriding pipeline state but state '{}' does not exist" .format(value)) req.reply("fail", "State '{}' does not exist.".format(value)) else: log.warning("Overriding pipelien state: {}".format(value)) self.state = value req.reply("ok") raise AsyncReply def start(self): """ Start the server """ AsyncDeviceServer.start(self) def stop(self): """ Stop the server """ AsyncDeviceServer.stop(self) def _subprocess_error(self, proc): """ Sets the error state because proc has ended. """ log.error( "Error handle called because subprocess {} ended with return code {}" .format(proc.pid, proc.returncode)) self._subprocessMonitor.stop() self.state = "error" @request(Str()) @return_reply() def request_configure(self, req, config_json): """ Configure EDD to receive and process data Returns: katcp reply object [[[ !configure ok | (fail [error description]) ]]] """ @coroutine def configure_wrapper(): try: yield self.configure(config_json) except FailReply as fr: req.reply("fail", str(fr)) except Exception as error: log.exception(str(error)) req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(configure_wrapper) raise AsyncReply @coroutine def configure(self, config_json=""): """ Default method for configuration. """ log.info("Running configure.") pass @request() @return_reply() def request_set_default_config(self, req): """ (Re-)set the config to the default. Returns: katcp reply object [[[ !reconfigure ok | (fail [error description]) ]]] """ logging.info("Setting default configuration") self._config = self._default_config.copy() req.reply("ok") raise AsyncReply @request(Str()) @return_reply() def request_set(self, req, config_json): """ Add the config_json to the current config Returns: katcp reply object [[[ !configure ok | (fail [error description]) ]]] """ @coroutine def wrapper(): try: yield self.set(config_json) except FailReply as fr: log.error(str(fr)) req.reply("fail", str(fr)) except Exception as error: log.exception(str(error)) req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(wrapper) raise AsyncReply @coroutine def _cfgjson2dict(self, config_json): """ Returns the provided config as dict if a json object or returns the object if it already is a dict. """ if isinstance(config_json, str) or isinstance(config_json, unicode_type): log.debug("Received config as string:\n {}".format(config_json)) if (not config_json.strip()) or config_json.strip() == '""': log.debug("String empty, returning empty dict.") raise Return({}) try: cfg = json.loads(config_json) except: log.error("Error parsing json") raise FailReply( "Cannot handle config string {} - Not valid json!".format( config_json)) elif isinstance(config_json, dict): log.debug("Received config as dict") cfg = config_json else: raise FailReply( "Cannot handle config type {}. Config has to bei either json formatted string or dict!" .format(type(config_json))) log.debug("Got cfg: {}, {}".format(cfg, type(cfg))) raise Return(cfg) @coroutine def set(self, config_json): """ Add the config_json to the current config. Input / output data streams will be filled with default values if not provided. The configuration will be rejected if no corresponding value is present in the default config. A warnign is emitted on type changes. The final configuration is stored in self._config for access in derived classes. Returns: katcp reply object [[[ !configure ok | (fail [error description]) ]]] """ log.debug("Updating configuration: '{}'".format(config_json)) cfg = yield self._cfgjson2dict(config_json) try: newcfg = updateConfig(self._config, cfg) # yield self.check_config(newcfg) self._config = newcfg log.debug("Updated config: '{}'".format(self._config)) except FailReply as E: log.error("Check config failed!") raise E except KeyError as error: raise FailReply("Unknown configuration option: {}".format( str(error))) except Exception as error: raise FailReply("Unknown ERROR: {}".format(str(error))) # @coroutine # def check_config(self, cfg): # """ # Checks a config dictionary for validity. to be implemented in child class. Raise FailReply on invalid setting. # """ # pass @request() @return_reply() def request_capture_start(self, req): """ Start the EDD backend processing This is the KATCP wrapper for the capture_start command Returns: katcp reply object [[[ !capture_start ok | (fail [error description]) ]]] """ @coroutine def start_wrapper(): try: yield self.capture_start() except FailReply as fr: log.error(str(fr)) req.reply("fail", str(fr)) except Exception as error: log.exception(str(error)) req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(start_wrapper) raise AsyncReply @coroutine def request_halt(self, req, msg): """ Halts the process. Reimplemnetation of base class halt without timeout as this crash """ if self.state == "running": yield self.capture_stop() yield self.deconfigure() self.ioloop.stop() req.reply("Server has stopepd - ByeBye!") raise AsyncReply def watchdog_error(self): """ Set error mode requested by watchdog. """ log.error("Error state requested by watchdog!") self.state = "error" @coroutine def capture_start(self): """ Default method - no effect """ pass @request() @return_reply() def request_capture_stop(self, req): """ Stop the EDD backend processing This is the KATCP wrapper for the capture_stop command Return: katcp reply object [[[ !capture_stop ok | (fail [error description]) ]]] """ @coroutine def stop_wrapper(): try: yield self.capture_stop() except FailReply as fr: log.error(str(fr)) req.reply("fail", str(fr)) except Exception as error: log.exception(str(error)) req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(stop_wrapper) raise AsyncReply @coroutine def capture_stop(self): """Default method - no effect""" pass @request(Str()) @return_reply() def request_measurement_prepare(self, req, config_json): """ Prepare measurement request Return: katcp reply object [[[ !measurement_prepare ok | (fail [error description]) ]]] """ @coroutine def wrapper(): try: yield self.measurement_prepare(config_json) except FailReply as fr: log.error(str(fr)) req.reply("fail", str(fr)) except Exception as error: log.exception(str(error)) req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(wrapper) raise AsyncReply @coroutine def measurement_prepare(self, config_json=""): """Default method - no effect""" pass @request() @return_reply() def request_measurement_start(self, req): """ Start emasurement. This is the KATCP wrapper for the measurement_start command Return: katcp reply object [[[ !measurement_start ok | (fail [error description]) ]]] """ @coroutine def wrapper(): try: yield self.measurement_start() except FailReply as fr: log.error(str(fr)) req.reply("fail", str(fr)) except Exception as error: log.exception(str(error)) req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(wrapper) raise AsyncReply @coroutine def measurement_start(self): """Default method - no effect""" pass @request() @return_reply() def request_measurement_stop(self, req): """ Stop measurement This is the KATCP wrapper for the measurement_stop command Return: katcp reply object [[[ !measurement_start ok | (fail [error description]) ]]] """ @coroutine def wrapper(): try: yield self.measurement_stop() except FailReply as fr: log.error(str(fr)) req.reply("fail", str(fr)) except Exception as error: log.exception(str(error)) req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(wrapper) raise AsyncReply @coroutine def measurement_stop(self): """Default method - no effect""" pass @request() @return_reply() def request_deconfigure(self, req): """ Deconfigure the pipeline. This is the KATCP wrapper for the deconfigure command Return: katcp reply object [[[ !deconfigure ok | (fail [error description]) ]]] """ @coroutine def deconfigure_wrapper(): try: yield self.deconfigure() except Exception as error: log.exception(str(error)) req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(deconfigure_wrapper) raise AsyncReply @coroutine def deconfigure(self): """Default method - no effect""" pass @request(include_msg=True) @return_reply() def request_register(self, req, msg): """ Register the pipeline in the datastore. Optionally the data store can be specified as "ip:port". If not specified the value in the configuration will be used. katcp reply object [[[ !configure ok | (fail [error description]) ]]] """ log.debug("regsiter request") @coroutine def wrapper(): try: if msg.arguments: host, port = msg.argument.split(':') port = int(port) else: host = self._config['data_store']['ip'] port = self._config['data_store']['port'] yield self.register(host, port) except FailReply as fr: log.error(str(fr)) req.reply("fail", str(fr)) except Exception as error: log.exception(str(error)) req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(wrapper) raise AsyncReply @coroutine def register(self, host=None, port=None): """ Registers the pipeline in the data store. Args: host, port: Ip and port of the data store. If no host and port ar eprovided, values from the internal config are used. """ if host == None: log.debug("No host provided. Use value from current config.") host = self.config["data_store"]["ip"] if port == None: log.debug("No portprovided. Use value from current config.") port = self.config["data_store"]["port"] log.debug("Register pipeline in data store @ {}:{}".format(host, port)) dataStore = EDDDataStore.EDDDataStore(host, port) dataStore.updateProduct(self._config)
class FbfWorkerServer(AsyncDeviceServer): VERSION_INFO = ("fbf-control-server-api", 0, 1) BUILD_INFO = ("fbf-control-server-implementation", 0, 1, "rc1") DEVICE_STATUSES = ["ok", "degraded", "fail"] STATES = ["idle", "preparing", "ready", "starting", "capturing", "stopping", "error"] IDLE, PREPARING, READY, STARTING, CAPTURING, STOPPING, ERROR = STATES def __init__(self, ip, port, dummy=False): """ @brief Construct new FbfWorkerServer instance @params ip The interface address on which the server should listen @params port The port that the server should bind to @params de_ip The IP address of the delay engine server @params de_port The port number for the delay engine server """ self._dc_ip = None self._dc_port = None self._delay_client = None self._delay_client = None self._delays = None self._dummy = dummy self._dada_input_key = 0xdada self._dada_coh_output_key = 0xcaca self._dada_incoh_output_key = 0xbaba super(FbfWorkerServer, self).__init__(ip,port) @coroutine def start(self): """Start FbfWorkerServer server""" super(FbfWorkerServer,self).start() @coroutine def stop(self): yield self.deregister() yield super(FbfWorkerServer,self).stop() def setup_sensors(self): """ @brief Set up monitoring sensors. Sensor list: - device-status - local-time-synced - fbf0-status - fbf1-status @note The following sensors are made available on top of default sensors implemented in AsynDeviceServer and its base classes. device-status: Reports the health status of the FBFUSE and associated devices: Among other things report HW failure, SW failure and observation failure. """ self._device_status_sensor = Sensor.discrete( "device-status", description = "Health status of FbfWorkerServer instance", params = self.DEVICE_STATUSES, default = "ok", initial_status = Sensor.NOMINAL) self.add_sensor(self._device_status_sensor) self._state_sensor = LoggingSensor.discrete( "state", params = self.STATES, description = "The current state of this worker instance", default = self.IDLE, initial_status = Sensor.NOMINAL) self._state_sensor.set_logger(log) self.add_sensor(self._state_sensor) self._delay_client_sensor = Sensor.string( "delay-engine-server", description = "The address of the currently set delay engine", default = "", initial_status = Sensor.UNKNOWN) self.add_sensor(self._delay_client_sensor) self._antenna_capture_order_sensor = Sensor.string( "antenna-capture-order", description = "The order in which the worker will capture antennas internally", default = "", initial_status = Sensor.UNKNOWN) self.add_sensor(self._antenna_capture_order_sensor) self._mkrecv_header_sensor = Sensor.string( "mkrecv-header", description = "The MKRECV/DADA header used for configuring capture with MKRECV", default = "", initial_status = Sensor.UNKNOWN) self.add_sensor(self._mkrecv_header_sensor) @property def capturing(self): return self.state == self.CAPTURING @property def idle(self): return self.state == self.IDLE @property def starting(self): return self.state == self.STARTING @property def stopping(self): return self.state == self.STOPPING @property def ready(self): return self.state == self.READY @property def preparing(self): return self.state == self.PREPARING @property def error(self): return self.state == self.ERROR @property def state(self): return self._state_sensor.value() def _system_call_wrapper(self, cmd): log.debug("System call: '{}'".format(" ".join(cmd))) if self._dummy: log.debug("Server is running in dummy mode, system call will be ignored") else: check_call(cmd) def _determine_feng_capture_order(self, antenna_to_feng_id_map, coherent_beam_config, incoherent_beam_config): # Need to sort the f-engine IDs into 4 states # 1. Incoherent but not coherent # 2. Incoherent and coherent # 3. Coherent but not incoherent # 4. Neither coherent nor incoherent # # We must catch all antennas as even in case 4 the data is required for the # transient buffer. # # To make this split, we first create the three sets, coherent, incoherent and all. mapping = antenna_to_feng_id_map all_feng_ids = set(mapping.values()) coherent_feng_ids = set(mapping[antenna] for antenna in parse_csv_antennas(coherent_beam_config['antennas'])) incoherent_feng_ids = set(mapping[antenna] for antenna in parse_csv_antennas(incoherent_beam_config['antennas'])) incoh_not_coh = incoherent_feng_ids.difference(coherent_feng_ids) incoh_and_coh = incoherent_feng_ids.intersection(coherent_feng_ids) coh_not_incoh = coherent_feng_ids.difference(incoherent_feng_ids) used_fengs = incoh_not_coh.union(incoh_and_coh).union(coh_not_incoh) unused_fengs = all_feng_ids.difference(used_fengs) # Output final order final_order = list(incoh_not_coh) + list(incoh_and_coh) + list(coh_not_incoh) + list(unused_fengs) start_of_incoherent_fengs = 0 end_of_incoherent_fengs = len(incoh_not_coh) + len(incoh_and_coh) start_of_coherent_fengs = len(incoh_not_coh) end_of_coherent_fengs = len(incoh_not_coh) + len(incoh_and_coh) + len(coh_not_incoh) start_of_unused_fengs = end_of_coherent_fengs end_of_unused_fengs = len(all_feng_ids) info = { "order": final_order, "incoherent_span":(start_of_incoherent_fengs, end_of_incoherent_fengs), "coherent_span":(start_of_coherent_fengs, end_of_coherent_fengs), "unused_span":(start_of_unused_fengs, end_of_unused_fengs) } return info @request(Str(), Int(), Int(), Float(), Float(), Str(), Str(), Str(), Str(), Str(), Int()) @return_reply() def request_prepare(self, req, feng_groups, nchans_per_group, chan0_idx, chan0_freq, chan_bw, mcast_to_beam_map, feng_config, coherent_beam_config, incoherent_beam_config, dc_ip, dc_port): """ @brief Prepare FBFUSE to receive and process data from a subarray @detail REQUEST ?configure feng_groups, nchans_per_group, chan0_idx, chan0_freq, chan_bw, mcast_to_beam_map, antenna_to_feng_id_map, coherent_beam_config, incoherent_beam_config Configure FBFUSE for the particular data products @param req A katcp request object @param feng_groups The contiguous range of multicast groups to capture F-engine data from, the parameter is formatted in stream notation, e.g.: spead://239.11.1.150+3:7147 @param nchans_per_group The number of frequency channels per multicast group @param chan0_idx The index of the first channel in the set of multicast groups @param chan0_freq The frequency in Hz of the first channel in the set of multicast groups @param chan_bw The channel bandwidth in Hz @param mcast_to_beam_map A JSON mapping between output multicast addresses and beam IDs. This is the sole authority for the number of beams that will be produced and their indexes. The map is in the form: @code { "spead://239.11.2.150:7147":"cfbf00001,cfbf00002,cfbf00003,cfbf00004", "spead://239.11.2.151:7147":"ifbf00001" } @param feng_config JSON dictionary containing general F-engine parameters. @code { 'bandwidth': 856e6, 'centre-frequency': 1200e6, 'sideband': 'upper', 'feng-antenna-map': {...}, 'sync-epoch': 12353524243.0, 'nchans': 4096 } @param coherent_beam_config A JSON object specifying the coherent beam configuration in the form: @code { 'tscrunch':16, 'fscrunch':1, 'antennas':'m007,m008,m009' } @endcode @param incoherent_beam_config A JSON object specifying the incoherent beam configuration in the form: @code { 'tscrunch':16, 'fscrunch':1, 'antennas':'m007,m008,m009' } @endcode @return katcp reply object [[[ !configure ok | (fail [error description]) ]]] """ if not self.idle: return ("fail", "FBF worker not in IDLE state") log.info("Preparing worker server instance") try: feng_config = json.loads(feng_config) except Exception as error: return ("fail", "Unable to parse F-eng config with error: {}".format(str(error))) try: mcast_to_beam_map = json.loads(mcast_to_beam_map) except Exception as error: return ("fail", "Unable to parse multicast beam mapping with error: {}".format(str(error))) try: coherent_beam_config = json.loads(coherent_beam_config) except Exception as error: return ("fail", "Unable to parse coherent beam config with error: {}".format(str(error))) try: incoherent_beam_config = json.loads(incoherent_beam_config) except Exception as error: return ("fail", "Unable to parse incoherent beam config with error: {}".format(str(error))) @coroutine def configure(): self._state_sensor.set_value(self.PREPARING) log.debug("Starting delay configuration server client") self._delay_client = KATCPClientResource(dict( name="delay-configuration-client", address=(dc_ip, dc_port), controlled=True)) self._delay_client.start() log.debug("Determining F-engine capture order") feng_capture_order_info = self._determine_feng_capture_order(feng_config['feng-antenna-map'], coherent_beam_config, incoherent_beam_config) log.debug("Capture order info: {}".format(feng_capture_order_info)) feng_to_antenna_map = {value:key for key,value in feng_config['feng-antenna-map'].items()} antenna_capture_order_csv = ",".join([feng_to_antenna_map[feng_id] for feng_id in feng_capture_order_info['order']]) self._antenna_capture_order_sensor.set_value(antenna_capture_order_csv) log.debug("Parsing F-engines to capture: {}".format(feng_groups)) capture_range = ip_range_from_stream(feng_groups) ngroups = capture_range.count partition_nchans = nchans_per_group * ngroups partition_bandwidth = partition_nchans * chan_bw npol = 2 ndim = 2 nbits = 8 tsamp = 1.0 / (feng_config['bandwidth'] / feng_config['nchans']) sample_clock = feng_config['bandwidth'] * 2 timestamp_step = feng_config['nchans'] * 2 * 256 # WARNING: This is only valid in 4k mode frequency_ids = [chan0_idx+nchans_per_group*ii for ii in range(ngroups)] #WARNING: Assumes contigous groups mkrecv_config = { 'frequency_mhz': (chan0_freq + feng_config['nchans']/2.0 * chan_bw) / 1e6, 'bandwidth': partition_bandwidth, 'tsamp_us': tsamp * 1e6, 'bytes_per_second': partition_bandwidth * npol * ndim * nbits, 'nchan': partition_nchans, 'dada_key': self._dada_input_key, 'nantennas': len(feng_capture_order_info['order']), 'antennas_csv': antenna_capture_order_csv, 'sync_epoch': feng_config['sync-epoch'], 'sample_clock': sample_clock, 'mcast_sources': ",".join([str(group) for group in capture_range]), 'mcast_port': capture_range.port, 'interface': "192.168.0.1", 'timestamp_step': timestamp_step, 'ordered_feng_ids_csv': ",".join(map(str, feng_capture_order_info['order'])), 'frequency_partition_ids_csv': ",".join(map(str,frequency_ids)) } mkrecv_header = make_mkrecv_header(mkrecv_config) self._mkrecv_header_sensor.set_value(mkrecv_header) log.info("Determined MKRECV configuration:\n{}".format(mkrecv_header)) log.debug("Parsing beam to multicast mapping") incoherent_beam = None incoherent_beam_group = None coherent_beam_to_group_map = {} for group, beams in mcast_to_beam_map.items(): for beam in beams.split(","): if beam.startswith("cfbf"): coherent_beam_to_group_map[beam] = group if beam.startswith("ifbf"): incoherent_beam = beam incoherent_beam_group = group log.debug("Determined coherent beam to multicast mapping: {}".format(coherent_beam_to_group_map)) if incoherent_beam: log.debug("Incoherent beam will be sent to: {}".format(incoherent_beam_group)) else: log.debug("No incoherent beam specified") """ Tasks: - compile kernels - create shared memory banks """ # Compile beamformer # TBD # Need to come up with a good way to allocate keys for dada buffers # Create input DADA buffer log.debug("Creating dada buffer for input with key '{}'".format("%x"%self._dada_input_key)) #self._system_call_wrapper(["dada_db","-k",self._dada_input_key,"-n","64","-l","-p"]) # Create coherent beam output DADA buffer log.debug("Creating dada buffer for coherent beam output with key '{}'".format("%x"%self._dada_coh_output_key)) #self._system_call_wrapper(["dada_db","-k",self._dada_coh_output_key,"-n","64","-l","-p"]) # Create incoherent beam output DADA buffer log.debug("Creating dada buffer for incoherent beam output with key '{}'".format("%x"%self._dada_incoh_output_key)) #self._system_call_wrapper(["dada_db","-k",self._dada_incoh_output_key,"-n","64","-l","-p"]) # Create SPEAD transmitter for coherent beams # Call to MKSEND # Create SPEAD transmitter for incoherent beam # Call to MKSEND # Need to pass the delay buffer controller the F-engine capture order but only for the coherent beams cstart, cend = feng_capture_order_info['coherent_span'] coherent_beam_feng_capture_order = feng_capture_order_info['order'][cstart:cend] coherent_beam_antenna_capture_order = [feng_to_antenna_map[idx] for idx in coherent_beam_feng_capture_order] # Start DelayBufferController instance # Here we are going to make the assumption that the server and processing all run in # one docker container that will be preallocated with the right CPU set, GPUs, memory # etc. This means that the configurations need to be unique by NUMA node... [Note: no # they don't, we can use the container IPC channel which isolates the IPC namespaces.] if not self._dummy: n_coherent_beams = len(coherent_beam_to_group_map) coherent_beam_antennas = parse_csv_antennas(coherent_beam_config['antennas']) self._delay_buffer_controller = DelayBufferController(self._delay_client, coherent_beam_to_group_map.keys(), coherent_beam_antenna_capture_order, 1) yield self._delay_buffer_controller.start() # Start beamformer instance # TBD # Define MKRECV configuration file # SPEAD receiver does not get started until a capture init call self._state_sensor.set_value(self.READY) req.reply("ok",) self.ioloop.add_callback(configure) raise AsyncReply @request(Str()) @return_reply() def request_deconfigure(self, req): """ @brief Deconfigure the FBFUSE instance. @note Deconfigure the FBFUSE instance. If FBFUSE uses katportalclient to get information from CAM, then it should disconnect at this time. @param req A katcp request object @return katcp reply object [[[ !deconfigure ok | (fail [error description]) ]]] """ # Need to make sure everything is stopped # Call self.stop? # Need to delete all allocated DADA buffers: @coroutine def deconfigure(): log.info("Destroying dada buffer for input with key '{}'".format(self._dada_input_key)) self._system_call_wrapper(["dada_db","-k",self._dada_input_key,"-d"]) log.info("Destroying dada buffer for coherent beam output with key '{}'".format(self._dada_coh_output_key)) self._system_call_wrapper(["dada_db","-k",self._dada_coh_output_key,"-n","64","-l","-p"]) log.info("Destroying dada buffer for incoherent beam output with key '{}'".format(self._dada_incoh_output_key)) self._system_call_wrapper(["dada_db","-k",self._dada_coh_output_key,"-n","64","-l","-p"]) log.info("Destroying delay buffer controller") del self._delay_buffer_controller self._delay_buffer_controller = None req.reply("ok",) self.ioloop.add_callback(deconfigure) raise AsyncReply @request(Str()) @return_reply() def request_capture_start(self, req): """ @brief Prepare FBFUSE ingest process for data capture. @note A successful return value indicates that FBFUSE is ready for data capture and has sufficient resources available. An error will indicate that FBFUSE is not in a position to accept data @param req A katcp request object @return katcp reply object [[[ !capture-init ok | (fail [error description]) ]]] """ if not self.ready: return ("fail", "FBF worker not in READY state") # Here we start MKRECV running into the input dada buffer self._mkrecv_ingest_proc = Popen(["mkrecv","--config",self._mkrecv_config_filename], stdout=PIPE, stderr=PIPE) return ("ok",) @request(Str()) @return_reply() def request_capture_stop(self, req): """ @brief Terminate the FBFUSE ingest process for the particular FBFUSE instance @note This writes out any remaining metadata, closes all files, terminates any remaining processes and frees resources for the next data capture. @param req A katcp request object @param product_id This is a name for the data product, used to track which subarray is being told to stop capture. For example "array_1_bc856M4k". @return katcp reply object [[[ !capture-done ok | (fail [error description]) ]]] """ if not self.capturing and not self.error: return ("ok",) @coroutine def stop_mkrecv_capture(): #send SIGTERM to MKRECV log.info("Sending SIGTERM to MKRECV process") self._mkrecv_ingest_proc.terminate() self._mkrecv_timeout = 10.0 log.info("Waiting {} seconds for MKRECV to terminate...".format(self._mkrecv_timeout)) now = time.time() while time.time()-now < self._mkrecv_timeout: retval = self._mkrecv_ingest_proc.poll() if retval is not None: log.info("MKRECV returned a return value of {}".format(retval)) break else: yield sleep(0.5) else: log.warning("MKRECV failed to terminate in alloted time") log.info("Killing MKRECV process") self._mkrecv_ingest_proc.kill() req.reply("ok",) self.ioloop.add_callback(self.stop_mkrecv_capture) raise AsyncReply
class MyServer(DeviceServer): VERSION_INFO = ("example-api", 1, 0) BUILD_INFO = ("example-implementation", 0, 1, "") # Optionally set the KATCP protocol version and features. Defaults to # the latest implemented version of KATCP, with all supported optional # features PROTOCOL_INFO = ProtocolFlags(5, 0, set([ ProtocolFlags.MULTI_CLIENT, ProtocolFlags.MESSAGE_IDS, ])) FRUIT = [ "apple", "banana", "pear", "kiwi", ] def setup_sensors(self): """Setup some server sensors.""" self._add_result = Sensor.float("add.result", "Last ?add result.", "", [-10000, 10000]) self._add_result.set_value(0, Sensor.UNREACHABLE) self._time_result = Sensor.timestamp("time.result", "Last ?time result.", "") self._time_result.set_value(0, Sensor.INACTIVE) self._eval_result = Sensor.string("eval.result", "Last ?eval result.", "") self._eval_result.set_value('', Sensor.UNKNOWN) self._fruit_result = Sensor.discrete("fruit.result", "Last ?pick-fruit result.", "", self.FRUIT) self._fruit_result.set_value('apple', Sensor.ERROR) self.add_sensor(self._add_result) self.add_sensor(self._time_result) self.add_sensor(self._eval_result) self.add_sensor(self._fruit_result) @request(Float(), Float()) @return_reply(Float()) def request_add(self, req, x, y): """Add two numbers""" r = x + y self._add_result.set_value(r) return ("ok", r) @request() @return_reply(Timestamp()) def request_time(self, req): """Return the current time in ms since the Unix Epoch.""" r = time.time() self._time_result.set_value(r) return ("ok", r) @request(Str()) @return_reply(Str()) def request_eval(self, req, expression): """Evaluate a Python expression.""" r = str(eval(expression)) self._eval_result.set_value(r) return ("ok", r) @request() @return_reply(Discrete(FRUIT)) def request_pick_fruit(self, req): """Pick a random fruit.""" r = random.choice(self.FRUIT + [None]) if r is None: return ("fail", "No fruit.") delay = random.randrange(1,5) req.inform("Picking will take %d seconds" % delay) def pick_handler(): self._fruit_result.set_value(r) req.reply("ok", r) handle_timer = threading.Timer(delay, pick_handler) handle_timer.start() raise AsyncReply @request(Str()) @return_reply() def request_set_sensor_inactive(self, req, sensor_name): """Set sensor status to inactive""" sensor = self.get_sensor(sensor_name) ts, status, value = sensor.read() sensor.set_value(value, sensor.INACTIVE, ts) return('ok',) @request(Str()) @return_reply() def request_set_sensor_unreachable(self, req, sensor_name): """Set sensor status to unreachable""" sensor = self.get_sensor(sensor_name) ts, status, value = sensor.read() sensor.set_value(value, sensor.UNREACHABLE, ts) return('ok',) def request_raw_reverse(self, req, msg): """ A raw request handler to demonstrate the calling convention if @request decoraters are not used. Reverses the message arguments. """ # msg is a katcp.Message.request object reversed_args = msg.arguments[::-1] # req.make_reply() makes a katcp.Message.reply using the correct request # name and message ID return req.make_reply(*reversed_args)
class JsonStatusServer(AsyncDeviceServer): VERSION_INFO = ("reynard-eff-jsonstatusserver-api", 0, 1) BUILD_INFO = ("reynard-eff-jsonstatusserver-implementation", 0, 1, "rc1") def __init__(self, server_host, server_port, mcast_group=JSON_STATUS_MCAST_GROUP, mcast_port=JSON_STATUS_PORT, parser=EFF_JSON_CONFIG, dummy=False): self._mcast_group = mcast_group self._mcast_port = mcast_port self._parser = parser self._dummy = dummy if not dummy: self._catcher_thread = StatusCatcherThread() else: self._catcher_thread = None self._monitor = None self._updaters = {} self._controlled = set() super(JsonStatusServer, self).__init__(server_host, server_port) @coroutine def _update_sensors(self): log.debug("Updating sensor values") data = self._catcher_thread.data if data is None: log.warning("Catcher thread has not received any data yet") return for name, params in self._parser.items(): if name in self._controlled: continue if "updater" in params: self._sensors[name].set_value(params["updater"](data)) def start(self): """start the server""" super(JsonStatusServer, self).start() if not self._dummy: self._catcher_thread.start() self._monitor = PeriodicCallback(self._update_sensors, 1000, io_loop=self.ioloop) self._monitor.start() def stop(self): """stop the server""" if not self._dummy: if self._monitor: self._monitor.stop() self._catcher_thread.stop() return super(JsonStatusServer, self).stop() @request() @return_reply(Str()) def request_xml(self, req): """request an XML version of the status message""" def make_elem(parent, name, text): child = etree.Element(name) child.text = text parent.append(child) @coroutine def convert(): try: root = etree.Element("TelescopeStatus", attrib={"timestamp": str(time.time())}) for name, sensor in self._sensors.items(): child = etree.Element("TelStat") make_elem(child, "Name", name) make_elem(child, "Value", str(sensor.value())) make_elem(child, "Status", str(sensor.status())) make_elem(child, "Type", self._parser[name]["type"]) if "units" in self._parser[name]: make_elem(child, "Units", self._parser[name]["units"]) root.append(child) except Exception as error: req.reply("ok", str(error)) else: req.reply("ok", etree.tostring(root)) self.ioloop.add_callback(convert) raise AsyncReply @request() @return_reply(Str()) def request_json(self, req): """request an JSON version of the status message""" return ("ok", self.as_json()) def as_json(self): """Convert status sensors to JSON object""" out = {} for name, sensor in self._sensors.items(): out[name] = str(sensor.value()) return json.dumps(out) @request(Str()) @return_reply(Str()) def request_sensor_control(self, req, name): """take control of a given sensor value""" if name not in self._sensors: return ("fail", "No sensor named '{0}'".format(name)) else: self._controlled.add(name) return ("ok", "{0} under user control".format(name)) @request() @return_reply(Str()) def request_sensor_control_all(self, req): """take control of all sensors value""" for name, sensor in self._sensors.items(): self._controlled.add(name) return ("ok", "{0} sensors under user control".format(len(self._controlled))) @request() @return_reply(Int()) def request_sensor_list_controlled(self, req): """List all controlled sensors""" count = len(self._controlled) for name in list(self._controlled): req.inform("{0} -- {1}".format(name, self._sensors[name].value())) return ("ok", count) @request(Str()) @return_reply(Str()) def request_sensor_release(self, req, name): """release a sensor from user control""" if name not in self._sensors: return ("fail", "No sensor named '{0}'".format(name)) else: self._controlled.remove(name) return ("ok", "{0} released from user control".format(name)) @request() @return_reply(Str()) def request_sensor_release_all(self, req): """take control of all sensors value""" self._controlled = set() return ("ok", "All sensors released") @request(Str(), Str()) @return_reply(Str()) def request_sensor_set(self, req, name, value): """Set the value of a sensor""" if name not in self._sensors: return ("fail", "No sensor named '{0}'".format(name)) if name not in self._controlled: return ("fail", "Sensor '{0}' not under user control".format(name)) try: param = self._parser[name] value = TYPE_CONVERTER[param["type"]](value) self._sensors[name].set_value(value) except Exception as error: return ("fail", str(error)) else: return ("ok", "{0} set to {1}".format(name, self._sensors[name].value())) def setup_sensors(self): """Set up basic monitoring sensors. """ for name, params in self._parser.items(): if params["type"] == "float": sensor = Sensor.float(name, description=params["description"], unit=params.get("units", None), default=params.get("default", 0.0), initial_status=Sensor.UNKNOWN) elif params["type"] == "string": sensor = Sensor.string(name, description=params["description"], default=params.get("default", ""), initial_status=Sensor.UNKNOWN) elif params["type"] == "int": sensor = Sensor.integer(name, description=params["description"], default=params.get("default", 0), unit=params.get("units", None), initial_status=Sensor.UNKNOWN) elif params["type"] == "bool": sensor = Sensor.boolean(name, description=params["description"], default=params.get("default", False), initial_status=Sensor.UNKNOWN) else: raise Exception("Unknown sensor type '{0}' requested".format( params["type"])) self.add_sensor(sensor)
class ApsMasterController(MasterController): """This is the main KATCP interface for the APSUSE pulsar searching system on MeerKAT. This controller only holds responsibility for capture of data from the CBF network and writing of that data to disk. This interface satisfies the following ICDs: CAM-APSUSE: <link> """ VERSION_INFO = ("mpikat-aps-api", 0, 1) BUILD_INFO = ("mpikat-aps-implementation", 0, 1, "rc1") DEVICE_STATUSES = ["ok", "degraded", "fail"] def __init__(self, ip, port, dummy=False): """ @brief Construct new ApsMasterController instance @params ip The IP address on which the server should listen @params port The port that the server should bind to """ super(ApsMasterController, self).__init__(ip, port, ApsWorkerPool()) self._katportal_wrapper_type = FbfKatportalMonitor self._dummy = dummy if self._dummy: for ii in range(8): self._server_pool.add("127.0.0.1", 50000 + ii) @request(Str(), Str(), Str()) @return_reply() def request_configure(self, req, product_id, streams_json, proxy_name): """ @brief Configure APSUSE to receive and process data from a subarray @detail REQUEST ?configure product_id antennas_csv n_channels streams_json proxy_name Configure APSUSE for the particular data products @param req A katcp request object @param product_id This is a name for the data product, which is a useful tag to include in the data, but should not be analysed further. For example "array_1_bc856M4k". @param streams_json a JSON struct containing config keys and values describing the streams. For example: @code {'stream_type1': { 'stream_name1': 'stream_address1', 'stream_name2': 'stream_address2', ...}, 'stream_type2': { 'stream_name1': 'stream_address1', 'stream_name2': 'stream_address2', ...}, ...} @endcode The steam type keys indicate the source of the data and the type, e.g. cam.http. stream_address will be a URI. For SPEAD streams, the format will be spead://<ip>[+<count>]:<port>, representing SPEAD stream multicast groups. When a single logical stream requires too much bandwidth to accommodate as a single multicast group, the count parameter indicates the number of additional consecutively numbered multicast group ip addresses, and sharing the same UDP port number. stream_name is the name used to identify the stream in CAM. A Python example is shown below, for five streams: One CAM stream, with type cam.http. The camdata stream provides the connection string for katportalclient (for the subarray that this APSUSE instance is being configured on). One F-engine stream, with type: cbf.antenna_channelised_voltage. One X-engine stream, with type: cbf.baseline_correlation_products. Two beam streams, with type: cbf.tied_array_channelised_voltage. The stream names ending in x are horizontally polarised, and those ending in y are vertically polarised. @code pprint(streams_dict) {'cam.http': {'camdata':'http://10.8.67.235/api/client/1'}, 'cbf.antenna_channelised_voltage': {'i0.antenna-channelised-voltage':'spead://239.2.1.150+15:7148'}, ...} @endcode If using katportalclient to get information from CAM, then reconnect and re-subscribe to all sensors of interest at this time. @param proxy_name The CAM name for the instance of the APSUSE data proxy that is being configured. For example, "APSUSE_3". This can be used to query sensors on the correct proxy, in the event that there are multiple instances in the same subarray. @note A configure call will result in the generation of a new subarray instance in APSUSE that will be added to the clients list. @return katcp reply object [[[ !configure ok | (fail [error description]) ]]] """ msg = ("Configuring new APSUSE product", "Product ID: {}".format(product_id), "Streams: {}".format(streams_json), "Proxy name: {}".format(proxy_name)) log.info("\n".join(msg)) # Test if product_id already exists if product_id in self._products: return ("fail", "APS already has a configured product with ID: {}".format( product_id)) # Determine number of nodes required based on number of antennas in subarray # Note this is a poor way of handling this that may be updated later. In theory # there is a throughput measure as a function of bandwidth, polarisations and number # of antennas that allows one to determine the number of nodes to run. Currently we # just assume one antennas worth of data per NIC on our servers, so two antennas per # node. streams = json.loads(streams_json) try: streams['cam.http']['camdata'] except KeyError as error: return ( "fail", "JSON streams object does not contain required key: {}".format( str(error))) @coroutine def configure(): fbf_monitor = self._katportal_wrapper_type( streams['cam.http']['camdata'], product_id) self._products[product_id] = ApsProductController( self, product_id, fbf_monitor, proxy_name) self._update_products_sensor() log.debug( "Configured APSUSE instance with ID: {}".format(product_id)) req.reply("ok", ) self.ioloop.add_callback(configure) raise AsyncReply @request(Str()) @return_reply() def request_deconfigure(self, req, product_id): """ @brief Deconfigure the APSUSE instance. @note Deconfigure the APSUSE instance. If APSUSE uses katportalclient to get information from CAM, then it should disconnect at this time. @param req A katcp request object @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". @return katcp reply object [[[ !deconfigure ok | (fail [error description]) ]]] """ log.info( "Deconfiguring APSUSE instace with ID '{}'".format(product_id)) # Test if product exists try: product = self._get_product(product_id) except ProductLookupError as error: return ("fail", str(error)) try: product.deconfigure() except Exception as error: return ("fail", str(error)) del self._products[product_id] self._update_products_sensor() return ("ok", ) @request(Str(), Str()) @return_reply() @coroutine def request_target_start(self, req, product_id, target): """ @brief Notify APSUSE that a new target is being observed @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". @param target A KATPOINT target string @return katcp reply object [[[ !target-start ok | (fail [error description]) ]]] """ log.info("Received new target: {}".format(target)) try: product = self._get_product(product_id) except ProductLookupError as error: raise Return(("fail", str(error))) try: target = Target(target) except Exception as error: raise Return(("fail", str(error))) yield product.target_start(target) raise Return(("ok", )) # DELETE this @request(Str()) @return_reply() @coroutine def request_target_stop(self, req, product_id): """ @brief Notify APSUSE that the telescope has stopped observing a target @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". @return katcp reply object [[[ !target-start ok | (fail [error description]) ]]] """ try: product = self._get_product(product_id) except ProductLookupError as error: raise Return(("fail", str(error))) yield product.target_stop() raise Return(("ok", )) @request(Str()) @return_reply() def request_capture_start(self, req, product_id): """ @brief Request that APSUSE start beams streaming @detail Upon this call the provided coherent and incoherent beam configurations will be evaluated to determine if they are physical and can be met with the existing hardware. If the configurations are acceptable then servers allocated to this instance will be triggered to begin production of beams. @param req A katcp request object @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". @return katcp reply object [[[ !start-beams ok | (fail [error description]) ]]] """ try: product = self._get_product(product_id) except ProductLookupError as error: return ("fail", str(error)) @coroutine def start(): try: yield product.capture_start() except Exception as error: req.reply("fail", str(error)) else: req.reply("ok", ) self.ioloop.add_callback(start) raise AsyncReply @request(Str()) @return_reply() def request_capture_stop(self, req, product_id): """ @brief Stop APSUSE streaming @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". """ try: product = self._get_product(product_id) except ProductLookupError as error: return ("fail", str(error)) @coroutine def stop(): yield product.capture_stop() req.reply("ok", ) self.ioloop.add_callback(stop) raise AsyncReply
class FbfMasterController(MasterController): """This is the main KATCP interface for the FBFUSE multi-beam beamformer on MeerKAT. This interface satisfies the following ICDs: CAM-FBFUSE: <link> TUSE-FBFUSE: <link> """ VERSION_INFO = ("mpikat-fbf-api", 0, 1) BUILD_INFO = ("mpikat-fbf-implementation", 0, 1, "rc1") DEVICE_STATUSES = ["ok", "degraded", "fail"] def __init__(self, ip, port, dummy=True, ip_range = FBF_IP_RANGE): """ @brief Construct new FbfMasterController instance @params ip The IP address on which the server should listen @params port The port that the server should bind to @params dummy Specifies if the instance is running in a dummy mode @note In dummy mode, the controller will act as a mock interface only, sending no requests to nodes. A valid node pool must still be provided to the instance, but this may point to non-existent nodes. """ self._ip_pool = IpRangeManager(ip_range_from_stream(ip_range)) super(FbfMasterController, self).__init__(ip, port, FbfWorkerPool()) self._dummy = dummy if self._dummy: for ii in range(64): self._server_pool.add("127.0.0.1", 50000+ii) def setup_sensors(self): """ @brief Set up monitoring sensors. """ super(FbfMasterController, self).setup_sensors() self._ip_pool_sensor = Sensor.string( "output-ip-range", description="The multicast address allocation for coherent beams", default=self._ip_pool.format_katcp(), initial_status=Sensor.NOMINAL) self.add_sensor(self._ip_pool_sensor) @request(Str(), Str(), Int(), Str(), Str()) @return_reply() def request_configure(self, req, product_id, antennas_csv, n_channels, streams_json, proxy_name): """ @brief Configure FBFUSE to receive and process data from a subarray @detail REQUEST ?configure product_id antennas_csv n_channels streams_json proxy_name Configure FBFUSE for the particular data products @param req A katcp request object @param product_id This is a name for the data product, which is a useful tag to include in the data, but should not be analysed further. For example "array_1_bc856M4k". @param antennas_csv A comma separated list of physical antenna names used in particular sub-array to which the data products belongs (e.g. m007,m008,m009). @param n_channels The integer number of frequency channels provided by the CBF. @param streams_json a JSON struct containing config keys and values describing the streams. For example: @code {'stream_type1': { 'stream_name1': 'stream_address1', 'stream_name2': 'stream_address2', ...}, 'stream_type2': { 'stream_name1': 'stream_address1', 'stream_name2': 'stream_address2', ...}, ...} @endcode The steam type keys indicate the source of the data and the type, e.g. cam.http. stream_address will be a URI. For SPEAD streams, the format will be spead://<ip>[+<count>]:<port>, representing SPEAD stream multicast groups. When a single logical stream requires too much bandwidth to accommodate as a single multicast group, the count parameter indicates the number of additional consecutively numbered multicast group ip addresses, and sharing the same UDP port number. stream_name is the name used to identify the stream in CAM. A Python example is shown below, for five streams: One CAM stream, with type cam.http. The camdata stream provides the connection string for katportalclient (for the subarray that this FBFUSE instance is being configured on). One F-engine stream, with type: cbf.antenna_channelised_voltage. One X-engine stream, with type: cbf.baseline_correlation_products. Two beam streams, with type: cbf.tied_array_channelised_voltage. The stream names ending in x are horizontally polarised, and those ending in y are vertically polarised. @code pprint(streams_dict) {'cam.http': {'camdata':'http://10.8.67.235/api/client/1'}, 'cbf.antenna_channelised_voltage': {'i0.antenna-channelised-voltage':'spead://239.2.1.150+15:7148'}, ...} @endcode If using katportalclient to get information from CAM, then reconnect and re-subscribe to all sensors of interest at this time. @param proxy_name The CAM name for the instance of the FBFUSE data proxy that is being configured. For example, "FBFUSE_3". This can be used to query sensors on the correct proxy, in the event that there are multiple instances in the same subarray. @note A configure call will result in the generation of a new subarray instance in FBFUSE that will be added to the clients list. @return katcp reply object [[[ !configure ok | (fail [error description]) ]]] """ msg = ("Configuring new FBFUSE product", "Product ID: {}".format(product_id), "Antennas: {}".format(antennas_csv), "Nchannels: {}".format(n_channels), "Streams: {}".format(streams_json), "Proxy name: {}".format(proxy_name)) log.info("\n".join(msg)) # Test if product_id already exists if product_id in self._products: return ("fail", "FBF already has a configured product with ID: {}".format(product_id)) # Determine number of nodes required based on number of antennas in subarray # Note this is a poor way of handling this that may be updated later. In theory # there is a throughput measure as a function of bandwidth, polarisations and number # of antennas that allows one to determine the number of nodes to run. Currently we # just assume one antennas worth of data per NIC on our servers, so two antennas per # node. try: antennas = parse_csv_antennas(antennas_csv) except AntennaValidationError as error: return ("fail", str(error)) valid_n_channels = [1024, 4096, 32768] if not n_channels in valid_n_channels: return ("fail", "The provided number of channels ({}) is not valid. Valid options are {}".format(n_channels, valid_n_channels)) streams = json.loads(streams_json) try: streams['cam.http']['camdata'] # Need to check for endswith('.antenna-channelised-voltage') as the i0 is not # guaranteed to stay the same. # i0 = instrument name # Need to keep this for future sensor lookups streams['cbf.antenna_channelised_voltage'] except KeyError as error: return ("fail", "JSON streams object does not contain required key: {}".format(str(error))) for key, value in streams['cbf.antenna_channelised_voltage'].items(): if key.endswith('.antenna-channelised-voltage'): instrument_name, _ = key.split('.') feng_stream_name = key feng_groups = value log.debug("Parsed instrument name from streams: {}".format(instrument_name)) break else: return ("fail", "Could not determine instrument name (e.g. 'i0') from streams") # TODO: change this request to @async_reply and make the whole thing a coroutine @coroutine def configure(): kpc = self._katportal_wrapper_type(streams['cam.http']['camdata']) # Get all antenna observer strings futures, observers = [],[] for antenna in antennas: log.debug("Fetching katpoint string for antenna {}".format(antenna)) futures.append(kpc.get_observer_string(antenna)) for ii,future in enumerate(futures): try: observer = yield future except Exception as error: log.error("Error on katportalclient call: {}".format(str(error))) req.reply("fail", "Error retrieving katpoint string for antenna {}".format(antennas[ii])) return else: log.debug("Fetched katpoint antenna: {}".format(observer)) observers.append(Antenna(observer)) # Get bandwidth, cfreq, sideband, f-eng mapping #TODO: Also get sync-epoch log.debug("Fetching F-engine and subarray configuration information") bandwidth_future = kpc.get_bandwidth(feng_stream_name) cfreq_future = kpc.get_cfreq(feng_stream_name) sideband_future = kpc.get_sideband(feng_stream_name) feng_antenna_map_future = kpc.get_antenna_feng_id_map(instrument_name, antennas) sync_epoch_future = kpc.get_sync_epoch() bandwidth = yield bandwidth_future cfreq = yield cfreq_future sideband = yield sideband_future feng_antenna_map = yield feng_antenna_map_future sync_epoch = yield sync_epoch_future feng_config = { 'bandwidth': bandwidth, 'centre-frequency': cfreq, 'sideband': sideband, 'feng-antenna-map': feng_antenna_map, 'sync-epoch': sync_epoch, 'nchans': n_channels } for key, value in feng_config.items(): log.debug("{}: {}".format(key, value)) product = FbfProductController(self, product_id, observers, n_channels, feng_groups, proxy_name, feng_config) self._products[product_id] = product self._update_products_sensor() req.reply("ok",) log.debug("Configured FBFUSE instance with ID: {}".format(product_id)) self.ioloop.add_callback(configure) raise AsyncReply @request(Str()) @return_reply() def request_deconfigure(self, req, product_id): """ @brief Deconfigure the FBFUSE instance. @note Deconfigure the FBFUSE instance. If FBFUSE uses katportalclient to get information from CAM, then it should disconnect at this time. @param req A katcp request object @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". @return katcp reply object [[[ !deconfigure ok | (fail [error description]) ]]] """ log.info("Deconfiguring FBFUSE instace with ID '{}'".format(product_id)) # Test if product exists try: product = self._get_product(product_id) except ProductLookupError as error: return ("fail", str(error)) try: product.deconfigure() except Exception as error: return ("fail", str(error)) del self._products[product_id] self._update_products_sensor() return ("ok",) @request(Str(), Str()) @return_reply() @coroutine def request_target_start(self, req, product_id, target): """ @brief Notify FBFUSE that a new target is being observed @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". @param target A KATPOINT target string @return katcp reply object [[[ !target-start ok | (fail [error description]) ]]] """ log.info("Received new target: {}".format(target)) try: product = self._get_product(product_id) except ProductLookupError as error: raise Return(("fail", str(error))) try: target = Target(target) except Exception as error: raise Return(("fail", str(error))) yield product.target_start(target) raise Return(("ok",)) # DELETE this @request(Str()) @return_reply() @coroutine def request_target_stop(self, req, product_id): """ @brief Notify FBFUSE that the telescope has stopped observing a target @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". @return katcp reply object [[[ !target-start ok | (fail [error description]) ]]] """ try: product = self._get_product(product_id) except ProductLookupError as error: raise Return(("fail", str(error))) yield product.target_stop() raise Return(("ok",)) @request(Str()) @return_reply() def request_capture_init(self, req, product_id): """NOOP""" return ("ok",) @request(Str()) @return_reply() def request_capture_done(self, req, product_id): """NOOP""" return ("ok",) @request(Str()) @return_reply() def request_capture_start(self, req, product_id): """ @brief Request that FBFUSE start beams streaming @detail Upon this call the provided coherent and incoherent beam configurations will be evaluated to determine if they are physical and can be met with the existing hardware. If the configurations are acceptable then servers allocated to this instance will be triggered to begin production of beams. @param req A katcp request object @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". @return katcp reply object [[[ !start-beams ok | (fail [error description]) ]]] """ try: product = self._get_product(product_id) except ProductLookupError as error: return ("fail", str(error)) @coroutine def start(): try: product.capture_start() except Exception as error: req.reply("fail", str(error)) else: req.reply("ok",) self.ioloop.add_callback(start) raise AsyncReply @request(Str(), Str()) @return_reply() def request_provision_beams(self, req, product_id, sb_id): """ @brief Request that FBFUSE asynchronously prepare to start beams streaming @detail Upon this call the provided coherent and incoherent beam configurations will be evaluated to determine if they are physical and can be met with the existing hardware. If the configurations are acceptable then servers allocated to this instance will be triggered to prepare for the production of beams. Unlike a call to ?capture-start, ?provision-beams will not trigger a connection to multicast groups and will not wait for completion before returning, instead it will start the process of beamformer resource alloction and compilation. To determine when the process is complete, the user must wait on the value of the product "state" sensor becoming "ready", e.g. @code client.sensor['{}-state'.format(proxy_name)].wait( lambda reading: reading.value == 'ready') @endcode @param req A katcp request object @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". @param sb_id Schedule block ID for the commencing schedule block @return katcp reply object [[[ !start-beams ok | (fail [error description]) ]]] """ # Note: the state of the product won't be updated until the start call hits the top of the # event loop. It may be preferable to keep a self.starting_future object and yield on it # in capture-start if it exists. The current implementation may or may not be a bug... try: product = self._get_product(product_id) except ProductLookupError as error: return ("fail", str(error)) # This check needs to happen here as this call # should return immediately if not product.idle: return ("fail", "Can only provision beams on an idle FBF product") self.ioloop.add_callback(lambda : product.prepare(sb_id)) return ("ok",) @request(Str()) @return_reply() def request_capture_stop(self, req, product_id): """ @brief Stop FBFUSE streaming @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". """ try: product = self._get_product(product_id) except ProductLookupError as error: return ("fail", str(error)) @coroutine def stop(): product.capture_stop() req.reply("ok",) self.ioloop.add_callback(stop) raise AsyncReply @request(Str(), Str(), Int()) @return_reply() def request_set_configuration_authority(self, req, product_id, hostname, port): """ @brief Set the configuration authority for an FBF product @detail The parameters passed here specify the address of a server that can be triggered to provide FBFUSE with configuration information at schedule block and target boundaries. The configuration authority must be a valid KATCP server. """ try: product = self._get_product(product_id) except ProductLookupError as error: return ("fail", str(error)) product.set_configuration_authority(hostname, port) return ("ok",) @request(Str()) @return_reply() def request_reset_beams(self, req, product_id): """ @brief Reset the positions of all allocated beams @note This call may only be made AFTER a successful call to start-beams. Before this point no beams are allocated to the instance. If all beams are currently allocated an exception will be raised. @param req A katcp request object @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". @return katcp reply object [[[ !reset-beams m ok | (fail [error description]) ]]] """ try: product = self._get_product(product_id) except ProductLookupError as error: return ("fail", str(error)) else: beam = product.reset_beams() return ("ok", ) @request(Str(), Str()) @return_reply(Str()) def request_add_beam(self, req, product_id, target): """ @brief Configure the parameters of one beam @note This call may only be made AFTER a successful call to start-beams. Before this point no beams are allocated to the instance. If all beams are currently allocated an exception will be raised. @param req A katcp request object @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". @param target A KATPOINT target string @return katcp reply object [[[ !add-beam ok | (fail [error description]) ]]] """ try: product = self._get_product(product_id) except ProductLookupError as error: return ("fail", str(error)) try: target = Target(target) except Exception as error: return ("fail", str(error)) beam = product.add_beam(target) return ("ok", beam.idx) @request(Str(), Str(), Int(), Float(), Float(), Float()) @return_reply(Str()) def request_add_tiling(self, req, product_id, target, nbeams, reference_frequency, overlap, epoch): """ @brief Configure the parameters of a static beam tiling @note This call may only be made AFTER a successful call to start-beams. Before this point no beams are allocated to the instance. If there are not enough free beams to satisfy the request an exception will be raised. @note Beam shapes calculated for tiling are always assumed to be 2D elliptical Gaussians. @param req A katcp request object @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". @param target A KATPOINT target string @param nbeams The number of beams in this tiling pattern. @param reference_frequency The reference frequency at which to calculate the synthesised beam shape, and thus the tiling pattern. Typically this would be chosen to be the centre frequency of the current observation. @param overlap The desired overlap point between beams in the pattern. The overlap defines at what power point neighbouring beams in the tiling pattern will meet. For example an overlap point of 0.1 corresponds to beams overlapping only at their 10%-power points. Similarly a overlap of 0.5 corresponds to beams overlapping at their half-power points. [Note: This is currently a tricky parameter to use when values are close to zero. In future this may be define in sigma units or in multiples of the FWHM of the beam.] @param epoch The desired epoch for the tiling pattern as a unix time. A typical usage would be to set the epoch to half way into the coming observation in order to minimise the effect of parallactic angle and array projection changes altering the shape and position of the beams and thus changing the efficiency of the tiling pattern. @return katcp reply object [[[ !add-tiling ok | (fail [error description]) ]]] """ try: product = self._get_product(product_id) except ProductLookupError as error: return ("fail", str(error)) try: target = Target(target) except Exception as error: return ("fail", str(error)) tiling = product.add_tiling(target, nbeams, reference_frequency, overlap, epoch) return ("ok", tiling.idxs()) @request() @return_reply(Int()) def request_product_list(self, req): """ @brief List all currently registered products and their states @param req A katcp request object @note The details of each product are provided via an #inform as a JSON string containing information on the product state. @return katcp reply object [[[ !product-list ok | (fail [error description]) <number of configured products> ]]], """ for product_id,product in self._products.items(): info = {} info[product_id] = product.info() as_json = json.dumps(info) req.inform(as_json) return ("ok",len(self._products)) @request(Str(), Str()) @return_reply() def request_set_default_target_configuration(self, req, product_id, target): """ @brief Set the configuration of FBFUSE from the FBFUSE configuration server @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". @param target A KATPOINT target string """ try: product = self._get_product(product_id) except ProductLookupError as error: return ("fail", str(error)) try: target = Target(target) except Exception as error: return ("fail", str(error)) if not product.capturing: return ("fail","Product must be capturing before a target confiugration can be set.") product.reset_beams() # TBD: Here we connect to some database and request the default configurations # For example this may return secondary target in the FoV # # As a default the current system will put one beam directly on target and # the rest of the beams in a static tiling pattern around this target now = time.time() nbeams = product._beam_manager.nbeams product.add_tiling(target, nbeams-1, 1.4e9, 0.5, now) product.add_beam(target) return ("ok",) @request(Str(), Str()) @return_reply() def request_set_default_sb_configuration(self, req, product_id, sb_id): """ @brief Set the configuration of FBFUSE from the FBFUSE configuration server @param product_id This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". @param sb_id The schedule block ID. Decisions of the configuarion of FBFUSE will be made dependent on the configuration of the current subarray, the primary and secondary science projects active and the targets expected to be visted during the execution of the schedule block. """ try: product = self._get_product(product_id) except ProductLookupError as error: return ("fail", str(error)) if product.capturing: return ("fail", "Cannot reconfigure a currently capturing instance.") product.configure_coherent_beams(400, product._katpoint_antennas, 1, 16) product.configure_incoherent_beam(product._katpoint_antennas, 1, 16) now = time.time() nbeams = product._beam_manager.nbeams product.add_tiling(target, nbeams-1, 1.4e9, 0.5, now) product.add_beam(target) return ("ok",)
class PafMasterController(MasterController): """This is the main KATCP interface for the PAF pulsar searching system on MeerKAT. This controller only holds responsibility for capture of data from the CBF network and writing of that data to disk. This interface satisfies the following ICDs: CAM-PAF: <link> """ VERSION_INFO = ("mpikat-paf-api", 0, 1) BUILD_INFO = ("mpikat-paf-implementation", 0, 1, "rc1") DEVICE_STATUSES = ["ok", "degraded", "fail"] CONTROL_MODES = ["KATCP", "SCPI"] KATCP, SCPI = CONTROL_MODES def __init__(self, ip, port, scpi_ip, scpi_port): """ @brief Construct new PafMasterController instance @params ip The IP address on which the server should listen @params port The port that the server should bind to """ super(PafMasterController, self).__init__(ip, port, PafWorkerPool()) self._control_mode = self.KATCP self._scpi_ip = scpi_ip self._scpi_port = scpi_port self._status_server = JsonStatusServer(ip, 0) def start(self): super(PafMasterController, self).start() self._status_server.start() address = self._status_server.bind_address log.info("Status server started at {}".format(address)) self._status_server_sensor.set_value(address) self._scpi_interface = PafScpiInterface( self, self._scpi_ip, self._scpi_port, self.ioloop) def stop(self): self._scpi_interface.stop() self._scpi_interface = None self._status_server.stop() super(PafMasterController, self).stop() def setup_sensors(self): super(PafMasterController, self).setup_sensors() self._paf_config_sensor = Sensor.string( "current-config", description="The currently set configuration for the PAF backend", default="", initial_status=Sensor.UNKNOWN) self.add_sensor(self._paf_config_sensor) self._status_server_sensor = Sensor.address( "status-server-address", description="The address of the status server", default="", initial_status=Sensor.UNKNOWN) self.add_sensor(self._status_server_sensor) @property def katcp_control_mode(self): return self._control_mode == self.KATCP @property def scpi_control_mode(self): return self._control_mode == self.SCPI @request(Str()) @return_reply() def request_set_control_mode(self, req, mode): """ @brief Set the external control mode for the master controller @param mode The external control mode to be used by the server (options: KATCP, SCPI) @detail The PafMasterController supports two methods of external control: KATCP and SCPI. The server will always respond to a subset of KATCP commands, however when set to SCPI mode the following commands are disabled to the KATCP interface: - configure - capture_start - capture_stop - deconfigure In SCPI control mode the PafScpiInterface is activated and the server will respond to SCPI requests. @return katcp reply object [[[ !configure ok | (fail [error description]) ]]] """ try: self.set_control_mode(mode) except Exception as error: return ("fail", str(error)) else: return ("ok",) def set_control_mode(self, mode): """ @brief Set the external control mode for the master controller @param mode The external control mode to be used by the server (options: KATCP, SCPI) """ mode = mode.upper() if mode not in self.CONTROL_MODES: raise UnknownControlMode("Unknown mode '{}', valid modes are '{}' ".format( mode, ", ".join(self.CONTROL_MODES))) else: self._control_mode = mode if self._control_mode == self.SCPI: self._scpi_interface.start() else: self._scpi_interface.stop() self._control_mode_sensor.set_value(self._control_mode) @request(Str()) @return_reply() def request_configure(self, req, config_json): """ @brief Configure PAF to receive and process data @param req A katcp request object @param config_json A JSON object containing configuration information. @note The JSON configuration object should be of the form: @code { "mode": "Search1Beam", "nbands": 48, "frequency": 1340.5, "nbeams": 18, "band_offset": 0, "write_filterbank": 0 } @endcode @return katcp reply object [[[ !configure ok | (fail [error description]) ]]] """ if not self.katcp_control_mode: return ("fail", "Master controller is in control mode: {}".format( self._control_mode)) @coroutine def configure_wrapper(): try: yield self.configure(config_json) except Exception as error: log.exception(str(error)) req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(configure_wrapper) raise AsyncReply @coroutine def configure(self, config_json): log.info("Configuring PAF backend") log.info("Configuration string: '{}'".format(config_json)) if self._products: log.error("PAF already has a configured data product") raise PafConfigurationError( "PAF already has a configured data product") config_dict = json.loads(config_json) for key in PAF_REQUIRED_KEYS: if key not in config_dict: message = "No value set for required configuration parameter '{}'".format( key) log.error(message) raise PafConfigurationError(message) self._paf_config_sensor.set_value(config_json) log.debug("Building product controller for PAF processing") self._products[PAF_PRODUCT_ID] = PafProductController( self, PAF_PRODUCT_ID) self._update_products_sensor() log.debug("Configuring product controller") try: yield self._products[PAF_PRODUCT_ID].configure(config_json) except Exception as error: log.error( "Failed to configure product with error: {}".format(str(error))) raise PafConfigurationError(str(error)) else: log.debug( "Configured product controller with ID: {}".format(PAF_PRODUCT_ID)) log.info("PAF backend configured") @request() @return_reply() def request_deconfigure(self, req): """ @brief Deconfigure the PAF instance. @param req A katcp request object @return katcp reply object [[[ !deconfigure ok | (fail [error description]) ]]] """ if not self.katcp_control_mode: return ("fail", "Master controller is in control mode: {}".format( self._control_mode)) @coroutine def deconfigure_wrapper(): try: yield self.deconfigure() except Exception as error: req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(deconfigure_wrapper) raise AsyncReply @coroutine def deconfigure(self): log.info("Deconfiguring PAF backend") product = self._get_product(PAF_PRODUCT_ID) log.debug("Deconfiguring product controller with ID: {}".format( PAF_PRODUCT_ID)) yield product.deconfigure() del self._products[PAF_PRODUCT_ID] self._update_products_sensor() log.info("PAF backend deconfigured") @request() @return_reply() def request_capture_start(self, req): """ @brief Start the PAF backend processing @note This is the KATCP wrapper for the capture_start command @return katcp reply object [[[ !capture_start ok | (fail [error description]) ]]] """ if not self.katcp_control_mode: return ("fail", "Master controller is in control mode: {}".format( self._control_mode)) @coroutine def start_wrapper(): try: yield self.capture_start() except Exception as error: log.exception(str(error)) req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(start_wrapper) raise AsyncReply @coroutine def capture_start(self): log.info("Starting PAF backend") status_json = self._status_server.as_json() log.info("Telescope status at capture start:\n{}".format( json.loads(status_json))) product = self._get_product(PAF_PRODUCT_ID) log.debug("Starting product controller with ID: {}".format( PAF_PRODUCT_ID)) yield product.capture_start(status_json) log.info("PAF backend started") @request() @return_reply() def request_capture_stop(self, req): """ @brief Stop PAF streaming @param PAF_PRODUCT_ID This is a name for the data product, used to track which subarray is being deconfigured. For example "array_1_bc856M4k". """ if not self.katcp_control_mode: return ("fail", "Master controller is in control mode: {}".format( self._control_mode)) @coroutine def stop_wrapper(): try: yield self.capture_stop() except Exception as error: req.reply("fail", str(error)) else: req.reply("ok") self.ioloop.add_callback(stop_wrapper) raise AsyncReply @coroutine def capture_stop(self): log.info("Stopping PAF backend") product = self._get_product(PAF_PRODUCT_ID) log.debug("Stopping product controller with ID: {}".format( PAF_PRODUCT_ID)) yield product.capture_stop() log.info("PAF backend stopped")
self._clients[name].stop() self._clients[name].join() del self._clients[name] @request(Str(), Str(), Int()) @return_reply(Str()) def request_client_add(self, req, name, ip, port): """Add a new client.""" try: self._add_client(name, ip, port) except KeyError, e: return ("fail", str(e)) return ("ok", "added client") @request() @return_reply(Str()) def request_client_list(self, req): """List all available clients""" msg = [""] for ii, (name, client) in enumerate(self._clients.items()): msg.append("{client.name} {client.address}".format(client=client)) req.inform("\n\_\_\_\_".join(msg)) return ("ok", "{count} clients found".format(count=len(self._clients))) @request() @return_reply(Discrete(NodeServer.DEVICE_STATUSES)) def request_device_status(self, req): """Return status of the instrument. Notes: Status is based on aggregate information from all subordinate client.
def test_unpack_types_many_with_multiple(self): expected = ['one', 2, 3] self.check_unpacking([Str(), Int(multiple=True)], [b'one', b'2', b'3'], expected)
class BaseFbfConfigurationAuthority(AsyncDeviceServer): """This is an example/template for how users may develop an fbf configuration authority server """ VERSION_INFO = ("mpikat-fbf-ca-api", 0, 1) BUILD_INFO = ("mpikat-fbf-ca-implementation", 0, 1, "rc1") DEVICE_STATUSES = ["ok", "degraded", "fail"] def __init__(self, ip, port): super(BaseFbfConfigurationAuthority, self).__init__(ip, port) self._configuration_sensors = {} self._configuration_callbacks = {} def start(self): """ @brief Start the BaseFbfConfigurationAuthority server """ super(BaseFbfConfigurationAuthority, self).start() def setup_sensors(self): """ @brief Set up monitoring sensors. @note The following sensors are made available on top of default sensors implemented in AsynDeviceServer and its base classes. device-status: Reports the health status of the CA server and associated devices: Among other things report HW failure, SW failure and observation failure. """ self._device_status = Sensor.discrete( "device-status", description="Health status of this device", params=self.DEVICE_STATUSES, default="ok", initial_status=Sensor.UNKNOWN) self.add_sensor(self._device_status) @request(Str(), Str()) @return_reply(Str()) @tornado.gen.coroutine def request_get_schedule_block_configuration(self, req, product_id, sb_id): """ @brief Get an FBFUSE configuration for the current instance @param product_id The product identifier @param sb_id The schedule block identifier @note The product_id argument may be superfluous, although it allows the CA server to look up parameters on the unconfigured product from the FBFUSE sensor set through katportalclient """ log.info( "Received SB configuration request for '{}' and schedule block: {}" .format(product_id, sb_id)) if product_id in self._configuration_sensors: self.remove_sensor(self._configuration_sensors[product_id]) del self._configuration_sensors[product_id] self.mass_inform(Message.inform('interface-changed')) config = yield self.get_sb_config(product_id, sb_id) raise Return(("ok", json.dumps(config))) @tornado.gen.coroutine def get_sb_config(self, product_id, sb_id): raise NotImplemented @request(Str(), Str()) @return_reply() @tornado.gen.coroutine def request_target_configuration_start(self, req, product_id, target_string): """ @brief Set up a beam configuration sensor for the FBFUSE instance @param product_id The product identifier @param target_string A KATPOINT target string (boresight pointing position) """ log.info( "Received target configuration request for '{}' with target: {}". format(product_id, target_string)) if not product_id in self._configuration_sensors: log.debug( "Creating configuration sensor for '{}'".format(product_id)) self._configuration_sensors[product_id] = Sensor.string( "{}-beam-position-configuration".format(product_id), description="Configuration description for FBF beam placement", default="", initial_status=Sensor.UNKNOWN) self.add_sensor(self._configuration_sensors[product_id]) self.mass_inform(Message.inform('interface-changed')) initial_config = yield self.get_target_config(product_id, target_string) self.update_target_config(product_id, initial_config) raise Return(("ok", )) @tornado.gen.coroutine def get_target_config(self, product_id, target): # This should call update target config raise NotImplemented def update_target_config(self, product_id, config): log.debug("Updating target config on '{}' with config: {}".format( product_id, config)) self._configuration_sensors[product_id].set_value(json.dumps(config))
class BLBackendInterface(AsyncDeviceServer): """Breakthrough Listen's KATCP Server Backend Interface This server responds to requests sent from CAM, most notably: @ configue @ capture-init @ capture-start @ capture-stop @ capture-done @ deconfigure But because it inherits from AsyncDeviceServer, also responds to: * halt * help * log-level * restart [#restartf1]_ * client-list * sensor-list * sensor-sampling * sensor-value * watchdog * version-list (only standard in KATCP v5 or later) * request-timeout-hint (pre-standard only if protocol flags indicates timeout hints, supported for KATCP v5.1 or later) * sensor-sampling-clear (non-standard) """ VERSION = "2020-06-19" DEVICE_STATUSES = ["ok", "fail", "degraded"] def __init__(self, server_host, server_port): self.port = server_port self.redis_server = redis.StrictRedis() super(BLBackendInterface, self).__init__(server_host, server_port) def start(self): """Start the server Based on the passed configuration object this is where the clients for subordinate nodes will be set up. """ super(BLBackendInterface, self).start() if (sys.stdout.isatty()): print(R""" ,'''''-._ ; ,. <> `-._ ; \' _,--'" ; ( ; , ` \ ;, , \ ; | | MeerKAT BL Backend Interface: ; |, | |\ KATCP Server ; | | | \ Version: {} |.-\ ,\ |\ : Port: {} |.| `. `-. | || :.| `-. \ ';; .- , \;;| ; , | ,\ https://github.com/danielczech/meerkat-backend-interface ; , ; \ https://github.com/ejmichaud/meerkat-backend-interface ; , /`. , ) __,;, ,' \ ,| _,--''__,| / \ : ,'_,-'' | ,/ | : / / | ; ; | | | __,-| |--..__,--| |---.--....___ ___,-| |----'' / | `._`-. `---- \ \ `''' ''' -- `.`. --' `.`-._ _, ,- __,- `-.`. --' `; """.format(self.VERSION, self.port)) @request(Str(), Str(), Int(), Str(), Str()) @return_reply() def request_configure(self, req, product_id, antennas_csv, n_channels, streams_json, proxy_name): """Receive metadata for upcoming observation. This command is used to configure a BLUSE instance when a new subarray is activated. Args: product_id (str): This is the name of the current subarray, which is used when requesting sensor data specific to components which belong to the current subarray. Eg "array_1". This name is unique across all current subarrays (no two concurrently active subarrays will have the same name). However, it is not a globally unique identifier for all time. An identical name may be provided for later activations of other subarrays. antennas_csv (str): A comma separated list of physical antenna names used in the current subarray. n_channels (int): The integer number of frequency channels provided by the CBF. streams_json (str) is a JSON struct containing config keys and values describing the streams. For example: {'stream_type1': { 'stream_name1': 'stream_address1', 'stream_name2': 'stream_address2', ...}, 'stream_type2': { 'stream_name1': 'stream_address1', 'stream_name2': 'stream_address2', ...}, ...} The steam type keys indicate the source of the data and the type, e.g. a cam.http. stream_address will be a URL. For SPEAD streams, the format will be spead://<ip>[+<count>]:<port>, representing SPEAD stream multicast groups. The count parameter indicates the number of additional consecutively numbered multicast group IP addresses (sharing the same UDP port number). stream_name is the name used to identify the stream in CAM. A Python example is shown below, for five streams: One CAM stream, with type cam.http. The CAM stream provides the connection string for katportalclient (for the specific subarray that this BLUSE instance is being configured for). One F-engine stream, with type: cbf.antenna_channelised_voltage. One X-engine stream, with type: cbf.baseline_correlation_products. Two beam streams, with type: cbf.tied_array_channelised_voltage. The stream names ending in x are horizontally polarised, and those ending in y are vertically polarised. proxy_name (str): The CAM name for the instance of the BLUSE data proxy that is being configured. For example, "bluse_3". There will only be a single BLUSE instance of the proxy per subarray. Returns: None (responds with "ok" or "fail" and logs either info or an error). Writes: - [product_id]:timestamp" -> "1534657577373.23423" (Redis string) - [product_id]:antennas" -> [1,2,3,4] (Redis list) - [product_id]:n_channels" -> "4096" (Redis string) - [product_id]:proxy_name "-> "bluse_N" (Redis string) - [product_id]:streams" -> {....} (Redis string) - current:obs:id -> [product_id] - [product_id]:cbf_prefix -> (Redis string) Publishes: redis-channel: 'alerts' <-- "configure" Examples: > ?configure array_1_bc856M4k a1,a2,a3,a4 128000 { "cam.http":{"camdata":"http://monctl.devnmk.camlab.kat.ac.za/api/client/2"}, "stream_type2":{"stream_name1":"stream_address1","stream_name2":"stream_address2"}} BLUSE_3 """ try: antennas_list = antennas_csv.split(",") json_dict = unpack_dict(streams_json) cam_url = json_dict['cam.http']['camdata'] except Exception as e: log.error(e) return ("fail", e) # Ascertain the CBF sensor prefix (and F-engine output type). # Default to 'wide'; if 'wide' is not available, try 'narrow1'. # If neither are available, take the first available F-engine # output type. try: stream_type = 'cbf.antenna_channelised_voltage' if ('wide.antenna-channelised-voltage' in json_dict[stream_type]): cbf_prefix = 'wide' log.info('CBF prefix extracted: wide') elif ('narrow1.antenna-channelised-voltage' in json_dict[stream_type]): cbf_prefix = 'narrow1' log.info('CBF prefix extracted: narrow1') else: cbf_prefix = next(iter(json_dict[stream_type])).split('.')[0] log.info('CBF prefix extracted: {}'.format(cb_prefix)) except Exception as e: cbf_prefix = 'wide' log.error('Could not extract CBF prefix; defaulting to \'wide\'') statuses = [] statuses.append( write_pair_redis(self.redis_server, "{}:timestamp".format(product_id), time.time())) statuses.append( write_list_redis(self.redis_server, "{}:antennas".format(product_id), antennas_list)) statuses.append( write_pair_redis(self.redis_server, "{}:n_channels".format(product_id), n_channels)) statuses.append( write_pair_redis(self.redis_server, "{}:proxy_name".format(product_id), proxy_name)) statuses.append( write_pair_redis(self.redis_server, "{}:streams".format(product_id), json.dumps(json_dict))) statuses.append( write_pair_redis(self.redis_server, "{}:cam:url".format(product_id), cam_url)) statuses.append( write_pair_redis(self.redis_server, "current:obs:id", product_id)) statuses.append( write_pair_redis(self.redis_server, "{}:cbf_prefix".format(product_id), cbf_prefix)) msg = "configure:{}".format(product_id) statuses.append( publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg)) if all(statuses): return ("ok", ) else: return ("fail", "Failed to publish to our local redis server") @request(Str()) @return_reply() def request_capture_init(self, req, product_id): """Signals that an observation will start soon. Publishes a message to the 'alerts' channel of the form: capture-init:product_id The product_id matches that which was sent in the ?configure request. """ msg = "capture-init:{}".format(product_id) success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg) if success: return ("ok", ) else: return ("fail", "Failed to publish to our local redis server") @request(Str()) @return_reply() def request_capture_start(self, req, product_id): """Signals that an observation is starting now. Publishes a message to the 'alerts' channel of the form: capture-start:product_id The product_id matches that which was sent in the ?configure request. """ msg = "capture-start:{}".format(product_id) success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg) if success: return ("ok", ) else: return ("fail", "Failed to publish to our local redis server") @request(Str()) @return_reply() def request_capture_stop(self, req, product_id): """Signals that an observation is has stopped. Publishes a message to the 'alerts' channel of the form: capture-stop:product_id The product_id matches that which was sent in the ?configure request. """ msg = "capture-stop:{}".format(product_id) success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg) if success: return ("ok", ) else: return ("fail", "Failed to publish to our local redis server") @request(Str()) @return_reply() def request_capture_done(self, req, product_id): """Signals that an observation has finished. Publishes a message to the 'alerts' channel of the form: capture-done:product_id The product_id matches that which was sent in the ?configure request. """ msg = "capture-done:{}".format(product_id) success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg) if success: return ("ok", ) else: return ("fail", "Failed to publish to our local redis server") @request(Str()) @return_reply() def request_deconfigure(self, req, product_id): """Signals that the current subarray has been deconfigured (dismantled) and its associated components released for use in another subarray. This is the signal to deconfigure the BLUSE instance associated with the current subarray (and which was created by the call to ?configure with the corresponding product_id). Note: CAM is expected to have sent a ?capture-done request before deconfiguring. If BLUSE uses an instance of katportalclient to get information from CAM for the current subarray, it should disconnect. Publishes a message to the 'alerts' channel of the form: deconfigure:product_id The product_id matches that which was sent in the ?configure request. Other backend processes (such as beamformer) should be notified that their data streams are ending. """ msg = "deconfigure:{}".format(product_id) success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg) if success: return ("ok", ) else: return ("fail", "Failed to publish to our local redis server") def setup_sensors(self): # TODO: Need to re-look at this function. """ @brief Set up monitoring sensors. @note The following sensors are made available on top of default sensors implemented in AsynDeviceServer and its base classes. device-status: Reports the health status of the proxy and associated devices: Among other things report HW failure, SW failure and observation failure. """ self._device_status = Sensor.discrete( "device-status", description="Health status of BLUSE", params=self.DEVICE_STATUSES, default="ok", initial_status=Sensor.NOMINAL) self.add_sensor(self._device_status) self._local_time_synced = Sensor.boolean( "local-time-synced", description="Indicates BLUSE is NTP syncronised.", default=True, # TODO: implement actual NTP synchronization request initial_status=Sensor.NOMINAL) self.add_sensor(self._local_time_synced) self._version = Sensor.string( "version", description="Reports the current BLUSE version", default=str(self.VERSION_INFO[1:]).strip('()').replace( ' ', '').replace(",", '.'), # e.g. '1.0' initial_status=Sensor.NOMINAL) self.add_sensor(self._version) def request_halt(self, req, msg): """Halts the server, writes to the log, and exits the program. Returns: success : {'ok', 'fail'} (whether scheduling the halt succeeded). Examples: ?halt !halt ok TODO: - Call halt method on superclass to avoid copy paste Doing this caused an issue: File "/Users/Eric/Berkeley/seti/packages/meerkat/lib/python2.7/site-packages/katcp/server.py", line 1102, in handle_request assert (reply.mtype == Message.REPLY) AttributeError: 'NoneType' object has no attribute 'mtype' """ f = Future() @gen.coroutine def _halt(): req.reply("ok") yield gen.moment self.stop(timeout=None) raise AsyncReply self.ioloop.add_callback(lambda: chain_future(_halt(), f)) log.critical("HALTING SERVER!!!") # notify_slack("KATCP server at MeerKAT has halted. Might want to check that!") sys.exit(0) @request() @return_reply(Str()) def request_find_alien(self, req): """Finds an alien. """ return ("ok", R""" . . . . . . . . . + . . . : . .. :. .___---------___. . . . . :.:. _".^ .^ ^. '.. :"-_. . . : . . .:../: . .^ :.:\. . . :: +. :.:/: . . . . . .:\ . : . . _ :::/: . ^ . . .:\ .. . . . - : :.:./. . .:\ . . . :..|: . . ^. .:| . . : : ..|| . . . !:| . . . . ::. ::\( . :)/ . . : . : .:.|. ###### .#######::| :.. . :- : .: ::|.####### ..########:| . . . .. . .. :\ ######## :######## :/ . .+ :: : -.:\ ######## . ########.:/ . .+ . . . . :.:\. ####### #######..:/ :: . . . . ::.:..:.\ . . ..:/ . . . .. : -::::.\. | | . .:/ . : . . .-:.":.::.\ ..:/ . -. . . . .: .:::.:.\. .:/ . . . : : ....::_:..:\ ___. :/ . . . .:. .. . .: :.:.:\ :/ + . . : . ::. :.:. .:.|\ .:/| . + . . ...:: ..| --.:| . . . . . . . ... :..:.."( ..)" . . . : . .: ::/ . .::\ """)