class EddFitsInterfaceClient(object): """ Wrapper class for a KATCP client to a EddFitsInterfaceServer """ def __init__(self, name, address): """ @brief Construct new instance @param parent The parent EddFitsInterfaceMasterController instance """ self.log = logging.getLogger("mpikat.edd_fi.{}".format(name)) self._fits_interface_client = KATCPClientResource( dict(name="fits-interface-client", address=address, controlled=True)) self._fits_interface_client.start() @coroutine def _request_helper(self, name, *args, **kwargs): if kwargs.pop("presync", None): yield self._fits_interface_client.until_synced(2) response = yield self._fits_interface_client.req[name](*args) if not response.reply.reply_ok(): self.log.error("Error on {} request: {}".format( name, response.reply.arguments[1])) raise EddFitsInterfaceClientError(response.reply.arguments[1]) @coroutine def configure(self, config): """ @brief Configure the attached FITS writer interface @param config A dictionary containing configuration information. """ yield self._fits_interface_client.until_synced(2) nbeams = config["nbeams"] nchans = config["nchans"] integration_time = config["integration_time"] blank_phases = config["blank_phases"] yield self._request_helper("configure", nbeams, nchans, integration_time, blank_phases) @coroutine def capture_start(self): """ @brief Start the FITS interface capturing data """ yield self._request_helper("start") @coroutine def capture_stop(self): """ @brief Stop the FITS interface from capturing data """ yield self._request_helper("stop")
class KatcpSidecar(object): def __init__(self, host, port): """ Constructs a new instance. :param host: The address of the server to sidecar :param port: The server port """ log.debug("Constructing sidecar for {}:{}".format(host, port)) self.rc = KATCPClientResource( dict(name="sidecar-client", address=(host, port), controlled=True)) self._update_callbacks = set() self._previous_sensors = set() @coroutine def start(self): """ @brief Start the sidecar """ @coroutine def _start(): log.debug("Waiting on synchronisation with server") yield self.rc.until_synced() log.debug("Client synced") log.debug("Requesting version info") response = yield self.rc.req.version_list() log.info("response: {}".format(response)) self.ioloop.add_callback(self.on_interface_changed) self.rc.start() self.ic = self.rc._inspecting_client self.ioloop = self.rc.ioloop self.ic.katcp_client.hook_inform( "interface-changed", lambda message: self.ioloop.add_callback( self.on_interface_changed)) self.ioloop.add_callback(_start) def stop(self): """ @brief Stop the sidecar """ self.rc.stop() @coroutine def on_interface_changed(self): """ @brief Synchronise with the sidecar'd servers new sensors """ log.debug("Waiting on synchronisation with server") yield self.rc.until_synced() log.debug("Client synced") current_sensors = set(self.rc.sensor.keys()) log.debug("Current sensor set: {}".format(current_sensors)) removed = self._previous_sensors.difference(current_sensors) log.debug("Sensors removed since last update: {}".format(removed)) added = current_sensors.difference(self._previous_sensors) log.debug("Sensors added since last update: {}".format(added)) for name in list(added): log.debug("Setting sampling strategy and callbacks on sensor '{}'". format(name)) self.rc.set_sampling_strategy(name, "auto") self.rc.set_sensor_listener(name, self.on_sensor_update) self._previous_sensors = current_sensors @coroutine def on_sensor_update(self, sensor, reading): """ @brief Callback to be executed on a sensor being updated @param sensor A KATCP Sensor Object @param reading The sensor reading """ log.debug("Recieved sensor update for sensor '{}': {}".format( sensor.name, repr(reading))) for callback in list(self._update_callbacks): try: callback(sensor, reading) except Exception as error: log.exception( "Failed to call update callback {} with error: {}".format( callback, str(error))) def add_sensor_update_callback(self, callback): """ @brief Add a sensor update callback. @param callback: The callback @note The callback must have a call signature of func(sensor, reading) """ self._update_callbacks.add(callback) def remove_sensor_update_callback(self, callback): """ @brief Remove a sensor update callback. @param callback: The callback """ self._update_callbacks.remove(callback)
def setup_32beam_4ant(worker_addr, dc_addr): # Hardcoded numbers nbeams = 32 tot_nchans = 4096 feng_groups = "spead://239.8.0.0+3:7148" chan0_idx = 0 chan0_freq = 1240e6 chan_bw = 856e6 / tot_nchans dc_client = KATCPClientResource( dict(name="delay-configuration-client", address=dc_addr, controlled=True)) yield dc_client.start() print "Syncing delay client" yield dc_client.until_synced(timeout=4.0) print "Synced" antennas_json = yield dc_client.sensor['antennas'].get_value() print "done" antennas = json.loads(antennas_json) print antennas worker_client = KATCPClientResource( dict(name="worker-server-client", address=worker_addr, controlled=True)) yield worker_client.start() print "Syncing worker server" yield worker_client.until_synced(timeout=4.0) print "done" coherent_beams_csv = ",".join( ["cfbf{:05d}".format(ii) for ii in range(nbeams)]) feng_antenna_map = {antenna: ii for ii, antenna in enumerate(antennas)} coherent_beam_antennas = antennas incoherent_beam_antennas = antennas nantennas = len(antennas) nchans_per_group = tot_nchans / nantennas / 4 mcast_to_beam_map = { "spead://239.11.1.0:7148": coherent_beams_csv, "spead://239.11.1.150:7148": "ifbf00001" } feng_config = { "bandwidth": 856e6, "centre-frequency": 1200e6, "sideband": "upper", "feng-antenna-map": feng_antenna_map, "sync-epoch": 12353524243.0, "nchans": 4096 } coherent_beam_config = { "tscrunch": 16, "fscrunch": 1, "antennas": ",".join(coherent_beam_antennas) } incoherent_beam_config = { "tscrunch": 16, "fscrunch": 1, "antennas": ",".join(incoherent_beam_antennas) } print "Making prepare request" response = yield worker_client.req.prepare( feng_groups, nchans_per_group, chan0_idx, chan0_freq, chan_bw, nbeams, json.dumps(mcast_to_beam_map), json.dumps(feng_config), json.dumps(coherent_beam_config), json.dumps(incoherent_beam_config), *dc_addr, timeout=300.0) if not response.reply.reply_ok(): raise Exception("Error on prepare: {}".format( response.reply.arguments)) else: print "prepare done"
class EddRoach2ProductController(ProductController): """ Wrapper class for an EDD ROACH2 product. """ def __init__(self, parent, product_id, r2rm_addr): """ Args: parent: The parent EddRoach2MasterController instance product_id: A unique identifier for this product r2rm_addr: The address of the R2RM (ROACH2 resource manager) to be used by this product. Passed in tuple format, e.g. ("127.0.0.1", 5000) """ super(EddRoach2ProductController, self).__init__(parent, product_id) self._r2rm_client = KATCPClientResource( dict(name="r2rm-client", address=r2rm_addr, controlled=True)) self._r2rm_client.start() self._firmware = None self._icom_id = None def setup_sensors(self): """ Setup the default KATCP sensors. Note: As this call is made only upon an EDD product configure call a mass inform is required to let connected clients know that the proxy interface has changed. """ super(EddRoach2ProductController, self).setup_sensors() self._firmware_server_sensor = Sensor.string( "firmware-server", description= "The address of the firmware server started by this product", default="", initial_status=Sensor.UNKNOWN) self.add_sensor(self._firmware_server_sensor) self._parent.mass_inform(Message.inform('interface-changed')) @state_change(["capturing", "error"], "idle") @coroutine def deconfigure(self): """ Deconfigure the product This method will remove any product sensors that were added to the parent master controller. """ yield self._r2rm_client.until_synced(2) response = yield self._r2rm_client.req.force_deconfigure_board( self._icom_id) if not response.reply.reply_ok(): self.log.error("Error on deconfigure request: {}".format( response.reply.arguments[1])) raise EddRoach2ProductError(response.reply.arguments[1]) self.teardown_sensors() self._firmware = None self._icom_id = None @state_change(["idle", "error"], "capturing", "preparing") @coroutine def configure(self, config): """ Configure the roach2 product Args: config: A dictionary containing configuration information. The dictionary should have a form similar to:: { "id": "roach2_spectrometer", "type": "roach2", "icom_id": "R2-EDD", "firmware": "EDDFirmware", "commands": [ ["program", []], ["start", []], ["set_integration_period", [1000.0]], ["set_destination_address", ["10.10.1.12", 60001]] ] } This method will request the specified roach2 board from the R2RM server and request a firmware deployment. The values of the 'icom_id' and 'firmware' must correspond to valid managed roach2 boards and firmwares as understood by the R2RM server. """ log.debug("Syncing with R2RM server") yield self._r2rm_client.until_synced(2) self._icom_id = config["icom_id"] self._firmware = config["firmware"] log.debug("Trying to force deconfiguring board") response = yield self._r2rm_client.req.force_deconfigure_board( self._icom_id) if not response.reply.reply_ok(): self.log.warning("Unable to deconfigure ROACH2 board: {}".format( response.reply.arguments[1])) log.debug("Sending configure request to R2RM server") response = yield self._r2rm_client.req.configure_board(self._icom_id, EDD_R2RM_USER, self._firmware, timeout=20) if not response.reply.reply_ok(): self.log.error("Error on configure request: {}".format( response.reply.arguments[1])) raise EddRoach2ProductError(response.reply.arguments[1]) _, firmware_ip, firmware_port = response.reply.arguments log.debug( "Connecting client to activated firmware server @ {}:{}".format( firmware_ip, firmware_port)) firmware_client = KATCPClientResource( dict(name="firmware-client", address=(firmware_ip, firmware_port), controlled=True)) firmware_client.start() log.debug("Syncing with firmware client") yield firmware_client.until_synced(2) for command, args in config["commands"]: log.debug( "Sending firmware server request '{}' with args '{}'".format( command, args)) response = yield firmware_client.req[command](*args, timeout=20) if not response.reply.reply_ok(): self.log.error("Error on {}->{} request: {}".format( command, args, response.reply.arguments[1])) raise EddRoach2ProductError(response.reply.arguments[1]) log.debug("Stopping client connection to firmware server") firmware_client.stop() @coroutine def capture_start(self): """ A no-op method for supporting the product controller interface. """ pass @coroutine def capture_stop(self): """ A no-op method for supporting the product controller interface. """ pass
def configure(self, config): """ Configure the roach2 product Args: config: A dictionary containing configuration information. The dictionary should have a form similar to:: { "id": "roach2_spectrometer", "type": "roach2", "icom_id": "R2-EDD", "firmware": "EDDFirmware", "commands": [ ["program", []], ["start", []], ["set_integration_period", [1000.0]], ["set_destination_address", ["10.10.1.12", 60001]] ] } This method will request the specified roach2 board from the R2RM server and request a firmware deployment. The values of the 'icom_id' and 'firmware' must correspond to valid managed roach2 boards and firmwares as understood by the R2RM server. """ log.debug("Syncing with R2RM server") yield self._r2rm_client.until_synced(2) self._icom_id = config["icom_id"] self._firmware = config["firmware"] log.debug("Trying to force deconfiguring board") response = yield self._r2rm_client.req.force_deconfigure_board( self._icom_id) if not response.reply.reply_ok(): self.log.warning("Unable to deconfigure ROACH2 board: {}".format( response.reply.arguments[1])) log.debug("Sending configure request to R2RM server") response = yield self._r2rm_client.req.configure_board(self._icom_id, EDD_R2RM_USER, self._firmware, timeout=20) if not response.reply.reply_ok(): self.log.error("Error on configure request: {}".format( response.reply.arguments[1])) raise EddRoach2ProductError(response.reply.arguments[1]) _, firmware_ip, firmware_port = response.reply.arguments log.debug( "Connecting client to activated firmware server @ {}:{}".format( firmware_ip, firmware_port)) firmware_client = KATCPClientResource( dict(name="firmware-client", address=(firmware_ip, firmware_port), controlled=True)) firmware_client.start() log.debug("Syncing with firmware client") yield firmware_client.until_synced(2) for command, args in config["commands"]: log.debug( "Sending firmware server request '{}' with args '{}'".format( command, args)) response = yield firmware_client.req[command](*args, timeout=20) if not response.reply.reply_ok(): self.log.error("Error on {}->{} request: {}".format( command, args, response.reply.arguments[1])) raise EddRoach2ProductError(response.reply.arguments[1]) log.debug("Stopping client connection to firmware server") firmware_client.stop()
class FbfProductController(object): """ Wrapper class for an FBFUSE product. """ STATES = ["idle", "preparing", "ready", "starting", "capturing", "stopping", "error"] IDLE, PREPARING, READY, STARTING, CAPTURING, STOPPING, ERROR = STATES def __init__(self, parent, product_id, katpoint_antennas, n_channels, feng_streams, proxy_name, feng_config): """ @brief Construct new instance @param parent The parent FbfMasterController instance @param product_id The name of the product @param katpoint_antennas A list of katpoint.Antenna objects @param n_channels The integer number of frequency channels provided by the CBF. @param feng_streams A string describing the multicast groups containing F-enging data (in the form: spead://239.11.1.150+15:7147) @param proxy_name The name of the proxy associated with this subarray (used as a sensor prefix) #NEED FENG CONFIG @param servers A list of FbfWorkerServer instances allocated to this product controller """ self.log = logging.getLogger("mpikat.fbfuse_product_controller.{}".format(product_id)) self.log.debug("Creating new FbfProductController with args: {}".format( ", ".join([str(i) for i in (parent, product_id, katpoint_antennas, n_channels, feng_streams, proxy_name, feng_config)]))) self._parent = parent self._product_id = product_id self._antennas = ",".join([a.name for a in katpoint_antennas]) self._katpoint_antennas = katpoint_antennas self._antenna_map = {a.name: a for a in self._katpoint_antennas} self._n_channels = n_channels self._streams = ip_range_from_stream(feng_streams) self._proxy_name = proxy_name self._feng_config = feng_config self._servers = [] self._beam_manager = None self._delay_config_server = None self._ca_client = None self._previous_sb_config = None self._managed_sensors = [] self._ibc_mcast_group = None self._cbc_mcast_groups = None self._default_sb_config = { u'coherent-beams-nbeams':400, u'coherent-beams-tscrunch':16, u'coherent-beams-fscrunch':1, u'coherent-beams-antennas':self._antennas, u'coherent-beams-granularity':6, u'incoherent-beam-tscrunch':16, u'incoherent-beam-fscrunch':1, u'incoherent-beam-antennas':self._antennas, u'bandwidth':self._feng_config['bandwidth'], u'centre-frequency':self._feng_config['centre-frequency']} self.setup_sensors() def __del__(self): self.teardown_sensors() def info(self): """ @brief Return a metadata dictionary describing this product controller """ out = { "antennas":self._antennas, "nservers":len(self.servers), "capturing":self.capturing, "streams":self._streams, "nchannels":self._n_channels, "proxy_name":self._proxy_name } return out def add_sensor(self, sensor): """ @brief Add a sensor to the parent object @note This method is used to wrap calls to the add_sensor method on the parent FbfMasterController instance. In order to disambiguate between sensors from describing different products the associated proxy name is used as sensor prefix. For example the "servers" sensor will be seen by clients connected to the FbfMasterController server as "<proxy_name>-servers" (e.g. "FBFUSE_1-servers"). """ prefix = "{}.".format(self._product_id) if sensor.name.startswith(prefix): self._parent.add_sensor(sensor) else: sensor.name = "{}{}".format(prefix,sensor.name) self._parent.add_sensor(sensor) self._managed_sensors.append(sensor) def setup_sensors(self): """ @brief Setup the default KATCP sensors. @note As this call is made only upon an FBFUSE configure call a mass inform is required to let connected clients know that the proxy interface has changed. """ self._state_sensor = LoggingSensor.discrete( "state", description = "Denotes the state of this FBF instance", params = self.STATES, default = self.IDLE, initial_status = Sensor.NOMINAL) self._state_sensor.set_logger(self.log) self.add_sensor(self._state_sensor) self._ca_address_sensor = Sensor.string( "configuration-authority", description = "The address of the server that will be deferred to for configurations", default = "", initial_status = Sensor.UNKNOWN) self.add_sensor(self._ca_address_sensor) self._available_antennas_sensor = Sensor.string( "available-antennas", description = "The antennas that are currently available for beamforming", default = json.dumps({antenna.name:antenna.format_katcp() for antenna in self._katpoint_antennas}), initial_status = Sensor.NOMINAL) self.add_sensor(self._available_antennas_sensor) self._phase_reference_sensor = Sensor.string( "phase-reference", description="A KATPOINT target string denoting the F-engine phasing centre", default="unset,radec,0,0", initial_status=Sensor.UNKNOWN) self.add_sensor(self._phase_reference_sensor) reference_antenna = Antenna("reference,{ref.lat},{ref.lon},{ref.elev}".format( ref=self._katpoint_antennas[0].ref_observer)) self._reference_antenna_sensor = Sensor.string( "reference-antenna", description="A KATPOINT antenna string denoting the reference antenna", default=reference_antenna.format_katcp(), initial_status=Sensor.NOMINAL) self.add_sensor(self._reference_antenna_sensor) self._bandwidth_sensor = Sensor.float( "bandwidth", description = "The bandwidth this product is configured to process", default = self._default_sb_config['bandwidth'], initial_status = Sensor.UNKNOWN) self.add_sensor(self._bandwidth_sensor) self._nchans_sensor = Sensor.integer( "nchannels", description = "The number of channels to be processesed", default = self._n_channels, initial_status = Sensor.UNKNOWN) self.add_sensor(self._nchans_sensor) self._cfreq_sensor = Sensor.float( "centre-frequency", description = "The centre frequency of the band this product configured to process", default = self._default_sb_config['centre-frequency'], initial_status = Sensor.UNKNOWN) self.add_sensor(self._cfreq_sensor) self._cbc_nbeams_sensor = Sensor.integer( "coherent-beam-count", description = "The number of coherent beams that this FBF instance can currently produce", default = self._default_sb_config['coherent-beams-nbeams'], initial_status = Sensor.UNKNOWN) self.add_sensor(self._cbc_nbeams_sensor) self._cbc_nbeams_per_group = Sensor.integer( "coherent-beam-count-per-group", description = "The number of coherent beams packed into a multicast group", default = 1, initial_status = Sensor.UNKNOWN) self.add_sensor(self._cbc_nbeams_per_group) self._cbc_ngroups = Sensor.integer( "coherent-beam-ngroups", description = "The number of multicast groups used for coherent beam transmission", default = 1, initial_status = Sensor.UNKNOWN) self.add_sensor(self._cbc_ngroups) self._cbc_nbeams_per_server_set = Sensor.integer( "coherent-beam-nbeams-per-server-set", description = "The number of beams produced by each server set", default = 1, initial_status = Sensor.UNKNOWN) self.add_sensor(self._cbc_nbeams_per_server_set) self._cbc_tscrunch_sensor = Sensor.integer( "coherent-beam-tscrunch", description = "The number time samples that will be integrated when producing coherent beams", default = self._default_sb_config['coherent-beams-tscrunch'], initial_status = Sensor.UNKNOWN) self.add_sensor(self._cbc_tscrunch_sensor) self._cbc_fscrunch_sensor = Sensor.integer( "coherent-beam-fscrunch", description = "The number frequency channels that will be integrated when producing coherent beams", default = self._default_sb_config['coherent-beams-fscrunch'], initial_status = Sensor.UNKNOWN) self.add_sensor(self._cbc_fscrunch_sensor) self._cbc_antennas_sensor = Sensor.string( "coherent-beam-antennas", description = "The antennas that will be used when producing coherent beams", default = self._default_sb_config['coherent-beams-antennas'], initial_status = Sensor.UNKNOWN) self.add_sensor(self._cbc_antennas_sensor) self._cbc_mcast_groups_sensor = Sensor.string( "coherent-beam-multicast-groups", description = "Multicast groups used by this instance for sending coherent beam data", default = "", initial_status = Sensor.UNKNOWN) self.add_sensor(self._cbc_mcast_groups_sensor) self._cbc_mcast_groups_mapping_sensor = Sensor.string( "coherent-beam-multicast-group-mapping", description = "Mapping of mutlicast group address to the coherent beams in that group", default= "", initial_status = Sensor.UNKNOWN) self.add_sensor(self._cbc_mcast_groups_mapping_sensor) self._ibc_nbeams_sensor = Sensor.integer( "incoherent-beam-count", description = "The number of incoherent beams that this FBF instance can currently produce", default = 1, initial_status = Sensor.UNKNOWN) self.add_sensor(self._ibc_nbeams_sensor) self._ibc_tscrunch_sensor = Sensor.integer( "incoherent-beam-tscrunch", description = "The number time samples that will be integrated when producing incoherent beams", default = self._default_sb_config['incoherent-beam-tscrunch'], initial_status = Sensor.UNKNOWN) self.add_sensor(self._ibc_tscrunch_sensor) self._ibc_fscrunch_sensor = Sensor.integer( "incoherent-beam-fscrunch", description = "The number frequency channels that will be integrated when producing incoherent beams", default = self._default_sb_config['incoherent-beam-fscrunch'], initial_status = Sensor.UNKNOWN) self.add_sensor(self._ibc_fscrunch_sensor) self._ibc_antennas_sensor = Sensor.string( "incoherent-beam-antennas", description = "The antennas that will be used when producing incoherent beams", default = self._default_sb_config['incoherent-beam-antennas'], initial_status = Sensor.UNKNOWN) self.add_sensor(self._ibc_antennas_sensor) self._ibc_mcast_group_sensor = Sensor.string( "incoherent-beam-multicast-group", description = "Multicast group used by this instance for sending incoherent beam data", default = "", initial_status = Sensor.UNKNOWN) self.add_sensor(self._ibc_mcast_group_sensor) self._servers_sensor = Sensor.string( "servers", description = "The worker server instances currently allocated to this product", default = ",".join(["{s.hostname}:{s.port}".format(s=server) for server in self._servers]), initial_status = Sensor.UNKNOWN) self.add_sensor(self._servers_sensor) self._nserver_sets_sensor = Sensor.integer( "nserver-sets", description = "The number of server sets (independent subscriptions to the F-engines)", default = 1, initial_status = Sensor.UNKNOWN) self.add_sensor(self._nserver_sets_sensor) self._nservers_per_set_sensor = Sensor.integer( "nservers-per-set", description = "The number of servers per server set", default = 1, initial_status = Sensor.UNKNOWN) self.add_sensor(self._nservers_per_set_sensor) self._delay_config_server_sensor = Sensor.string( "delay-config-server", description = "The address of the delay configuration server for this product", default = "", initial_status = Sensor.UNKNOWN) self.add_sensor(self._delay_config_server_sensor) def teardown_sensors(self): """ @brief Remove all sensors created by this product from the parent server. @note This method is required for cleanup to stop the FBF sensor pool becoming swamped with unused sensors. """ for sensor in self._managed_sensors: self._parent.remove_sensor(sensor) self._managed_sensors = [] self._parent.mass_inform(Message.inform('interface-changed')) @property def servers(self): return self._servers @property def capturing(self): return self.state == self.CAPTURING @property def idle(self): return self.state == self.IDLE @property def starting(self): return self.state == self.STARTING @property def stopping(self): return self.state == self.STOPPING @property def ready(self): return self.state == self.READY @property def preparing(self): return self.state == self.PREPARING @property def error(self): return self.state == self.ERROR @property def state(self): return self._state_sensor.value() def _verify_antennas(self, antennas): """ @brief Verify that a set of antennas is available to this instance. @param antennas A CSV list of antenna names """ self.log.debug("Verifying antenna set: {}".format(antennas)) antennas_set = set([ant.name for ant in self._katpoint_antennas]) requested_antennas = set(antennas) return requested_antennas.issubset(antennas_set) def set_configuration_authority(self, hostname, port): if self._ca_client: self._ca_client.stop() self._ca_client = KATCPClientResource(dict( name = 'configuration-authority-client', address = (hostname, port), controlled = True)) self._ca_client.start() self._ca_address_sensor.set_value("{}:{}".format(hostname, port)) @coroutine def get_ca_sb_configuration(self, sb_id): self.log.debug("Retrieving schedule block configuration from configuration authority") yield self._ca_client.until_synced() try: response = yield self._ca_client.req.get_schedule_block_configuration(self._proxy_name, sb_id) except Exception as error: self.log.error("Request for SB configuration to CA failed with error: {}".format(str(error))) raise error try: config_dict = json.loads(response.reply.arguments[1]) except Exception as error: self.log.error("Could not parse CA SB configuration with error: {}".format(str(error))) raise error self.log.debug("Configuration authority returned: {}".format(config_dict)) raise Return(config_dict) def reset_sb_configuration(self): self.log.debug("Reseting schedule block configuration") try: self.capture_stop() except Exception as error: self.log.warning("Received error while attempting capture stop: {}".format(str(error))) self._parent._server_pool.deallocate(self._servers) if self._ibc_mcast_group: self._parent._ip_pool.free(self._ibc_mcast_group) if self._cbc_mcast_groups: self._parent._ip_pool.free(self._cbc_mcast_groups) self._cbc_mcast_groups = None self._ibc_mcast_group = None self._servers = [] if self._delay_config_server: self._delay_config_server.stop() self._delay_config_server = None self._beam_manager = None def set_error_state(self, message): self.reset_sb_configuration() self._state_sensor.set_value(self.ERROR) def set_sb_configuration(self, config_dict): """ @brief Set the schedule block configuration for this product @param config_dict A dictionary specifying configuation parameters, e.g. @code { u'coherent-beams-nbeams':100, u'coherent-beams-tscrunch':22, u'coherent-beams-fscrunch':2, u'coherent-beams-antennas':'m007', u'coherent-beams-granularity':6, u'incoherent-beam-tscrunch':16, u'incoherent-beam-fscrunch':1, u'incoherent-beam-antennas':'m008' } @endcode @detail Valid parameters for the configuration dictionary are as follows: coherent-beams-nbeams - The desired number of coherent beams to produce coherent-beams-tscrunch - The number of spectra to integrate in the coherent beamformer coherent-beams-tscrunch - The number of spectra to integrate in the coherent beamformer coherent-beams-fscrunch - The number of channels to integrate in the coherent beamformer coherent-beams-antennas - The specific antennas to use for the coherent beamformer coherent-beams-granularity - The number of beams per output mutlicast group (an integer divisor or multiplier of this number will be used) incoherent-beam-tscrunch - The number of spectra to integrate in the incoherent beamformer incoherent-beam-fscrunch - The number of channels to integrate in the incoherent beamformer incoherent-beam-antennas - The specific antennas to use for the incoherent beamformer centre-frequency - The desired centre frequency in Hz bandwidth - The desired bandwidth in Hz @note FBFUSE reasonably assumes that the user does not know the possible configurations at any given time. As such it tries to satisfy the users request but will not throw an error if the requested configuration is not acheivable, instead opting to provide a reduced configuration. For example the user may request 1000 beams and 6 beams per multicast group but FBFUSE may configure to produce 860 beams and 24 beams per multicast group. If the user can only use 6 beams per multcast group, then in the 24-beam case they must subscribe to the same multicast group 4 times on different nodes. """ if self._previous_sb_config == config_dict: self.log.info("Configuration is unchanged, proceeding with existing configuration") return else: self._previous_sb_config = config_dict self.reset_sb_configuration() self.log.info("Setting schedule block configuration") config = deepcopy(self._default_sb_config) config.update(config_dict) self.log.info("Configuring using: {}".format(config)) requested_cbc_antenna = parse_csv_antennas(config['coherent-beams-antennas']) if not self._verify_antennas(requested_cbc_antenna): raise Exception("Requested coherent beam antennas are not a subset of the available antennas") requested_ibc_antenna = parse_csv_antennas(config['incoherent-beam-antennas']) if not self._verify_antennas(requested_ibc_antenna): raise Exception("Requested incoherent beam antennas are not a subset of the available antennas") # first we need to get one ip address for the incoherent beam self._ibc_mcast_group = self._parent._ip_pool.allocate(1) self._ibc_mcast_group_sensor.set_value(self._ibc_mcast_group.format_katcp()) largest_ip_range = self._parent._ip_pool.largest_free_range() nworkers_available = self._parent._server_pool.navailable() cm = FbfConfigurationManager(len(self._katpoint_antennas), self._feng_config['bandwidth'], self._n_channels, nworkers_available, largest_ip_range) requested_nantennas = len(parse_csv_antennas(config['coherent-beams-antennas'])) mcast_config = cm.get_configuration( config['coherent-beams-tscrunch'], config['coherent-beams-fscrunch'], config['coherent-beams-nbeams'], requested_nantennas, config['bandwidth'], config['coherent-beams-granularity']) self._bandwidth_sensor.set_value(config['bandwidth']) self._cfreq_sensor.set_value(config['centre-frequency']) self._nchans_sensor.set_value(mcast_config['num_chans']) self._cbc_nbeams_sensor.set_value(mcast_config['num_beams']) self._cbc_nbeams_per_group.set_value(mcast_config['num_beams_per_mcast_group']) self._cbc_ngroups.set_value(mcast_config['num_mcast_groups']) self._cbc_nbeams_per_server_set.set_value(mcast_config['num_beams_per_worker_set']) self._cbc_tscrunch_sensor.set_value(config['coherent-beams-tscrunch']) self._cbc_fscrunch_sensor.set_value(config['coherent-beams-fscrunch']) self._cbc_antennas_sensor.set_value(config['coherent-beams-antennas']) self._ibc_tscrunch_sensor.set_value(config['incoherent-beam-tscrunch']) self._ibc_fscrunch_sensor.set_value(config['incoherent-beam-fscrunch']) self._ibc_antennas_sensor.set_value(config['incoherent-beam-antennas']) self._servers = self._parent._server_pool.allocate(mcast_config['num_workers_total']) server_str = ",".join(["{s.hostname}:{s.port}".format(s=server) for server in self._servers]) self._servers_sensor.set_value(server_str) self._nserver_sets_sensor.set_value(mcast_config['num_worker_sets']) self._nservers_per_set_sensor.set_value(mcast_config['num_workers_per_set']) self._cbc_mcast_groups = self._parent._ip_pool.allocate(mcast_config['num_mcast_groups']) self._cbc_mcast_groups_sensor.set_value(self._cbc_mcast_groups.format_katcp()) return cm @coroutine def get_ca_target_configuration(self, target): def ca_target_update_callback(received_timestamp, timestamp, status, value): # TODO, should we really reset all the beams or should we have # a mechanism to only update changed beams config_dict = json.loads(value) self.reset_beams() for target_string in config_dict.get('beams',[]): target = Target(target_string) self.add_beam(target) for tiling in config_dict.get('tilings',[]): target = Target(tiling['target']) #required freq = float(tiling.get('reference_frequency', self._cfreq_sensor.value())) nbeams = int(tiling['nbeams']) overlap = float(tiling.get('overlap', 0.5)) epoch = float(tiling.get('epoch', time.time())) self.add_tiling(target, nbeams, freq, overlap, epoch) yield self._ca_client.until_synced() try: response = yield self._ca_client.req.target_configuration_start(self._proxy_name, target.format_katcp()) except Exception as error: self.log.error("Request for target configuration to CA failed with error: {}".format(str(error))) raise error if not response.reply.reply_ok(): error = Exception(response.reply.arguments[1]) self.log.error("Request for target configuration to CA failed with error: {}".format(str(error))) raise error yield self._ca_client.until_synced() sensor = self._ca_client.sensor["{}_beam_position_configuration".format(self._proxy_name)] sensor.register_listener(ca_target_update_callback) self._ca_client.set_sampling_strategy(sensor.name, "event") def _beam_to_sensor_string(self, beam): return beam.target.format_katcp() @coroutine def target_start(self, target): self._phase_reference_sensor.set_value(target) if self._ca_client: yield self.get_ca_target_configuration(target) else: self.log.warning("No configuration authority is set, using default beam configuration") @coroutine def target_stop(self): if self._ca_client: sensor_name = "{}_beam_position_configuration".format(self._proxy_name) self._ca_client.set_sampling_strategy(sensor_name, "none") @coroutine def prepare(self, sb_id): """ @brief Prepare the beamformer for streaming @detail This method evaluates the current configuration creates a new DelayEngine and passes a prepare call to all allocated servers. """ if not self.idle: raise FbfProductStateError([self.IDLE], self.state) self.log.info("Preparing FBFUSE product") self._state_sensor.set_value(self.PREPARING) self.log.debug("Product moved to 'preparing' state") # Here we need to parse the streams and assign beams to streams: #mcast_addrs, mcast_port = parse_stream(self._streams['cbf.antenna_channelised_voltage']['i0.antenna-channelised-voltage']) if not self._ca_client: self.log.warning("No configuration authority found, using default configuration parameters") cm = self.set_sb_configuration(self._default_sb_config) else: #TODO: get the schedule block ID into this call from somewhere (configure?) try: config = yield self.get_ca_sb_configuration(sb_id) cm = self.set_sb_configuration(config) except Exception as error: self.log.error("Configuring from CA failed with error: {}".format(str(error))) self.log.warning("Reverting to default configuration") cm = self.set_sb_configuration(self._default_sb_config) cbc_antennas_names = parse_csv_antennas(self._cbc_antennas_sensor.value()) cbc_antennas = [self._antenna_map[name] for name in cbc_antennas_names] self._beam_manager = BeamManager(self._cbc_nbeams_sensor.value(), cbc_antennas) self._delay_config_server = DelayConfigurationServer("127.0.0.1", 0, self._beam_manager) self._delay_config_server.start() self.log.info("Started delay engine at: {}".format(self._delay_config_server.bind_address)) de_ip, de_port = self._delay_config_server.bind_address self._delay_config_server_sensor.set_value((de_ip, de_port)) # Need to tear down the beam sensors here # Here calculate the beam to multicast map self._beam_sensors = [] mcast_to_beam_map = {} groups = [ip for ip in self._cbc_mcast_groups] idxs = [beam.idx for beam in self._beam_manager.get_beams()] for group in groups: self.log.debug("Allocating beams to {}".format(str(group))) key = str(group) for _ in range(self._cbc_nbeams_per_group.value()): if not key in mcast_to_beam_map: mcast_to_beam_map[str(group)] = [] value = idxs.pop(0) self.log.debug("--> Allocated {} to {}".format(value, str(group))) mcast_to_beam_map[str(group)].append(value) self._cbc_mcast_groups_mapping_sensor.set_value(json.dumps(mcast_to_beam_map)) for beam in self._beam_manager.get_beams(): sensor = Sensor.string( "coherent-beam-{}".format(beam.idx), description="R.A. (deg), declination (deg) and source name for coherent beam with ID {}".format(beam.idx), default=self._beam_to_sensor_string(beam), initial_status=Sensor.UNKNOWN) beam.register_observer(lambda beam, sensor=sensor: sensor.set_value(self._beam_to_sensor_string(beam))) self._beam_sensors.append(sensor) self.add_sensor(sensor) self._parent.mass_inform(Message.inform('interface-changed')) #Here we actually start to prepare the remote workers ip_splits = self._streams.split(N_FENG_STREAMS_PER_WORKER) # This is assuming lower sideband and bandwidth is always +ve fbottom = self._feng_config['centre-frequency'] - self._feng_config['bandwidth']/2. coherent_beam_config = { 'tscrunch':self._cbc_tscrunch_sensor.value(), 'fscrunch':self._cbc_fscrunch_sensor.value(), 'antennas':self._cbc_antennas_sensor.value() } incoherent_beam_config = { 'tscrunch':self._ibc_tscrunch_sensor.value(), 'fscrunch':self._ibc_fscrunch_sensor.value(), 'antennas':self._ibc_antennas_sensor.value() } prepare_futures = [] for ii, (server, ip_range) in enumerate(zip(self._servers, ip_splits)): chan0_idx = cm.nchans_per_worker * ii chan0_freq = fbottom + chan0_idx * cm.channel_bandwidth future = server.prepare(ip_range.format_katcp(), cm.nchans_per_group, chan0_idx, chan0_freq, cm.channel_bandwidth, mcast_to_beam_map, self._feng_config['feng-antenna-map'], coherent_beam_config, incoherent_beam_config, de_ip, de_port) prepare_futures.append(future) failure_count = 0 for future in prepare_futures: try: yield future except Exception as error: log.error("Failed to configure server with error: {}".format(str(error))) failure_count += 1 if failure_count > 0: self._state_sensor.set_value(self.ERROR) self.log.info("Failed to prepare FBFUSE product") else: self._state_sensor.set_value(self.READY) self.log.info("Successfully prepared FBFUSE product") def deconfigure(self): """ @brief Deconfigure the product. To be called on a subarray deconfigure. @detail This is the final cleanup operation for the product, it should delete all sensors and ensure the release of all resource allocations. """ self.reset_sb_configuration() self.teardown_sensors() def capture_start(self): if not self.ready: raise FbfProductStateError([self.READY], self.state) self._state_sensor.set_value(self.STARTING) self.log.debug("Product moved to 'starting' state") """ futures = [] for server in self._servers: futures.append(server.req.start_capture()) for future in futures: try: response = yield future except: pass """ self._state_sensor.set_value(self.CAPTURING) self.log.debug("Product moved to 'capturing' state") def capture_stop(self): """ @brief Stops the beamformer servers streaming. @detail This should only be called on a schedule block reconfiguration if the same configuration persists between schedule blocks then it is preferable to continue streaming rather than stopping and starting again. """ if not self.capturing and not self.error: return self._state_sensor.set_value(self.STOPPING) self.target_stop() for server in self._servers: #yield server.req.deconfigure() pass self._state_sensor.set_value(self.IDLE) def add_beam(self, target): """ @brief Specify the parameters of one managed beam @param target A KATPOINT target object @return Returns the allocated Beam object """ valid_states = [self.READY, self.CAPTURING, self.STARTING] if not self.state in valid_states: raise FbfProductStateError(valid_states, self.state) return self._beam_manager.add_beam(target) def add_tiling(self, target, number_of_beams, reference_frequency, overlap, epoch): """ @brief Add a tiling to be managed @param target A KATPOINT target object @param reference_frequency The reference frequency at which to calculate the synthesised beam shape, and thus the tiling pattern. Typically this would be chosen to be the centre frequency of the current observation. @param overlap The desired overlap point between beams in the pattern. The overlap defines at what power point neighbouring beams in the tiling pattern will meet. For example an overlap point of 0.1 corresponds to beams overlapping only at their 10%-power points. Similarly a overlap of 0.5 corresponds to beams overlapping at their half-power points. [Note: This is currently a tricky parameter to use when values are close to zero. In future this may be define in sigma units or in multiples of the FWHM of the beam.] @returns The created Tiling object """ valid_states = [self.READY, self.CAPTURING, self.STARTING] if not self.state in valid_states: raise FbfProductStateError(valid_states, self.state) tiling = self._beam_manager.add_tiling(target, number_of_beams, reference_frequency, overlap) try: tiling.generate(self._katpoint_antennas, epoch) except Exception as error: self.log.error("Failed to generate tiling pattern with error: {}".format(str(error))) return tiling def reset_beams(self): """ @brief reset and deallocate all beams and tilings managed by this instance @note All tiling will be lost on this call and must be remade for subsequent observations """ valid_states = [self.READY, self.CAPTURING, self.STARTING] if not self.state in valid_states: raise FbfProductStateError(valid_states, self.state) self._beam_manager.reset()
def setup(self, subarray_size, antennas_csv, nbeams, tot_nchans, feng_groups, chan0_idx, worker_idx): cbc_antennas_names = parse_csv_antennas(antennas_csv) cbc_antennas = [Antenna(ANTENNAS[name]) for name in cbc_antennas_names] self._beam_manager = BeamManager(nbeams, cbc_antennas) self._delay_config_server = DelayConfigurationServer( "127.0.0.1", 0, self._beam_manager) self._delay_config_server.start() antennas_json = self._delay_config_server._antennas_sensor.value() antennas = json.loads(antennas_json) coherent_beams = ["cfbf{:05d}".format(ii) for ii in range(nbeams)] coherent_beams_csv = ",".join(coherent_beams) feng_antenna_map = {antenna: ii for ii, antenna in enumerate(antennas)} coherent_beam_antennas = antennas incoherent_beam_antennas = antennas nantennas = len(antennas) nchans_per_group = tot_nchans / subarray_size / 4 nchans = ip_range_from_stream(feng_groups).count * nchans_per_group chan0_freq = 1240e6 chan_bw = 856e6 / tot_nchans mcast_to_beam_map = {"spead://239.11.1.150:7148": "ifbf00001"} for ii in range(8): mcast_to_beam_map["spead://239.11.1.{}:7148".format( ii)] = ",".join(coherent_beams[4 * ii:4 * (ii + 1)]) feng_config = { "bandwidth": 856e6, "centre-frequency": 1200e6, "sideband": "upper", "feng-antenna-map": feng_antenna_map, "sync-epoch": 1554907897.0, "nchans": tot_nchans } coherent_beam_config = { "tscrunch": 16, "fscrunch": 1, "antennas": ",".join(coherent_beam_antennas) } incoherent_beam_config = { "tscrunch": 16, "fscrunch": 1, "antennas": ",".join(incoherent_beam_antennas) } worker_client = KATCPClientResource( dict(name="worker-server-client", address=self._worker_server.bind_address, controlled=True)) yield worker_client.start() yield worker_client.until_synced() print "preparing" response = yield worker_client.req.prepare( feng_groups, nchans_per_group, chan0_idx, chan0_freq, chan_bw, nbeams, json.dumps(mcast_to_beam_map), json.dumps(feng_config), json.dumps(coherent_beam_config), json.dumps(incoherent_beam_config), *self._delay_config_server.bind_address, timeout=300.0) if not response.reply.reply_ok(): raise Exception("Error on prepare: {}".format( response.reply.arguments)) else: print "prepare done" yield worker_client.req.capture_start()
class WorkerWrapper(object): """Wrapper around a client to an FbfWorkerServer instance. """ def __init__(self, hostname, port): """ @brief Create a new wrapper around a client to a worker server @params hostname The hostname for the worker server @params port The port number that the worker server serves on """ log.debug("Creating worker client to worker at {}:{}".format( hostname, port)) self._client = KATCPClientResource( dict(name="worker-server-client", address=(hostname, port), controlled=True)) self.hostname = hostname self.port = port self.priority = 0 # Currently no priority mechanism is implemented self._started = False @coroutine def get_sensor_value(self, sensor_name): """ @brief Retrieve a sensor value from the worker """ yield self._client.until_synced() response = yield self._client.req.sensor_value(sensor_name) if not response.reply.reply_ok(): raise WorkerRequestError(response.reply.arguments[1]) raise Return(response.informs[0].arguments[-1]) def start(self): """ @brief Start the client to the worker server """ log.debug("Starting client to worker at {}:{}".format( self.hostname, self.port)) self._client.start() self._started = True @coroutine def reset(self): yield self._client.until_synced() response = yield self._client.req.reset() if not response.reply.reply_ok(): raise WorkerRequestError(response.reply.arguments[1]) def is_connected(self): return self._client.is_connected() def __repr__(self): return "<{} @ {}:{} (connected = {})>".format(self.__class__.__name__, self.hostname, self.port, self.is_connected()) def __hash__(self): # This has override is required to allow these wrappers # to be used with set() objects. The implication is that # the combination of hostname and port is unique for a # worker server return hash((self.hostname, self.port)) def __eq__(self, other): # Also implemented to help with hashing # for sets return self.__hash__() == hash(other) def __del__(self): if self._started: try: self._client.stop() except Exception as error: log.exception(str(error))
class DigitiserPacketiserClient(object): def __init__(self, host, port=7147): """ Wraps katcp commands to control a digitiser/packetiser. Args: host: The host IP or name for the desired packetiser KATCP interface port: The port number for the desired packetiser KATCP interface """ self._host = host self._port = port self._client = KATCPClientResource( dict(name="digpack-client", address=(self._host, self._port), controlled=True)) self._client.start() self._capture_started = False self._sampling_modes = { 4096000000: ("virtex7_dk769b", "4.096GHz", 3), 4000000000: ("virtex7_dk769b", "4.0GHz", 5), 3600000000: ("virtex7_dk769b", "3.6GHz", 7), 3520000000: ("virtex7_dk769b", "3.52GHz", 7), 3500000000: ("virtex7_dk769b", "3.5GHz", 7), 3200000000: ("virtex7_dk769b", "3.2GHz", 9), 2600000000: ("virtex7_dk769b", "2.6GHz", 3), 2560000000: ("virtex7_dk769b", "2.56GHz", 2), 1750000000: ( "virtex7_dk769b_test146.mkt", "3.5GHz", 7 ) # This is a special mode for the meerkat digitial filter cores inside the edd. # An effective 1750 Mhz sampling rate/ 875MHz # bandwidth is achieved by digitial filtering of # the 3.5GHz sampled rate! } # This is quite hacky, and the design of this client has to be has to be improved. Possibly by ahving a client per firmware self.__firmware = None def stop(self): self._client.stop() @coroutine def _safe_request(self, request_name, *args): """ Send a request to client and prints response ok / error message. Args: request_name: Name of the request *args: Arguments passed to the request. """ _log.info("Sending packetiser request '{}' with arguments {}".format( request_name, args)) yield self._client.until_synced() response = yield self._client.req[request_name](*args) if not response.reply.reply_ok(): _log.error("'{}' request failed with error: {}".format( request_name, response.reply.arguments[1])) raise DigitiserPacketiserError(response.reply.arguments[1]) else: _log.debug("'{}' request successful".format(request_name)) raise Return(response) @coroutine def _check_interfaces(self, interfaces=['iface00', 'iface01']): """ Check if interface of digitizer is in error state. """ _log.debug("Checking status of 40 GbE interfaces") yield self._client.until_synced() @coroutine def _check_interface(name): _log.debug("Checking status of '{}'".format(name)) sensor = self._client.sensor[ 'rxs_packetizer_40g_{}_am_lock_status'.format(name)] status = yield sensor.get_value() if not status == 0x0f: _log.warning("Interface '{}' in error state".format(name)) raise PacketiserInterfaceError( "40-GbE interface '{}' did not boot".format(name)) else: _log.debug("Interface '{}' is healthy".format(name)) for iface in interfaces: yield _check_interface(iface) @coroutine def set_predecimation(self, factor): """ Set a predecimation factor for the packetizer - for e.g. factor=2 only every second sample is used. """ allowedFactors = [1, 2, 4, 8, 16] # Eddy Nussbaum, private communication if factor not in allowedFactors: raise RuntimeError( "predicimation factor {} not in allowed factors {}".format( factor, allowedFactors)) yield self._safe_request("rxs_packetizer_edd_predecimation", factor) @coroutine def set_noise_diode_frequency(self, frequency): """ Set noise diode frequency to given value. """ if frequency == 0: yield self.set_noise_diode_firing_pattern(0.0, 0.0, "now") else: yield self.set_noise_diode_firing_pattern(0.5, 1. / frequency, "now") @coroutine def set_noise_diode_firing_pattern(self, percentage, period, start="now"): """ Set noise diode frequency to given value. Args: percentage: Percentage of period which the noise diode is turned on. period: Period of fireing [s]. """ _log.debug("Set noise diode firing pattern") yield self._safe_request("noise_source", start, percentage, period) @coroutine def set_sampling_rate(self, rate, retries=3): """ Sets the sampling rate. Args: rate: The sampling rate in samples per second (e.g. 2.6 GHz should be passed as 2600000000.0) To allow time for reinitialisation of the packetiser firmware during this call we enforce a 10 second sleep before the function returns. """ try: args = self._sampling_modes[int(rate)] except KeyError as error: pos_freqs = "\n".join( [" - {} Hz ".format(f) for f in self._sampling_modes.keys()]) error_msg = "Frequency {} Hz not in possible frequencies:\n{}".format( rate, pos_freqs) _log.error(error_msg) raise DigitiserPacketiserError(error_msg) attempts = 0 while True: _log.debug("Reinit packetizer with firmware: {}".format(args[0])) response = yield self._safe_request("rxs_packetizer_system_reinit", *args) self.__firmware = args[0] yield sleep(20) try: _log.warning( "Hard coded firmware names in interface checks. This is a shortterm hack!" ) if args[0] == "virtex7_dk769b": yield self._check_interfaces() elif args[0] == "virtex7_dk769b_test146.mkt": yield self._check_interfaces(["iface00"]) else: RuntimeError("Unknown core") except PacketiserInterfaceError as error: if attempts >= retries: raise error else: _log.warning("Retrying system initalisation") attempts += 1 continue else: break @coroutine def set_digitial_filter(self, filter_number): """ Sets the digital filter number. """ yield self._safe_request("rxs_packetizer_40g_filter_selection_set", filter_number) @coroutine def set_bit_width(self, nbits): """ Sets the number of bits per sample out of the packetiser Args: nbits: The desired number of bits per sample (e.g. 8 or 12) """ valid_modes = {8: "edd08", 10: "edd10", 12: "edd12"} _log.warning("Firmware switch for bit set mode!") if self.__firmware == "virtex7_dk769b_test146.mkt": _log.debug("Firmware does not support setting bit rate!") return try: mode = valid_modes[int(nbits)] except KeyError as error: msg = "Invalid bit depth, valid bit depths are: {}".format( valid_modes.keys()) _log.error(msg) raise DigitiserPacketiserError(msg) yield self._safe_request("rxs_packetizer_edd_switchmode", mode) @coroutine def flip_spectrum(self, flip): """ Flip spectrum flip = True/False to adjust for even/odd nyquist zone """ if flip: yield self._safe_request("rxs_packetizer_edd_flipsignalspectrum", "on") else: yield self._safe_request("rxs_packetizer_edd_flipsignalspectrum", "off") @coroutine def set_destinations(self, v_dest, h_dest): """ Sets the multicast destinations for data out of the packetiser Args: v_dest: The vertical polarisation channel destinations h_dest: The horizontal polarisation channel destinations The destinations should be provided as composite stream definition strings, e.g. 225.0.0.152+3:7148 (this defines four multicast groups: 225.0.0.152, 225.0.0.153, 225.0.0.154 and 225.0.0.155, all using port 7148). Currently the packetiser only accepts contiguous IP ranges for each set of destinations. """ yield self._safe_request("capture_destination", "v", v_dest) yield self._safe_request("capture_destination", "h", h_dest) @coroutine def set_mac_address(self, intf, mac): """ Sets the mac adresses of the source NICs of the packetiser Args: intf: The number of the NIC mac: The mac of the NIC """ yield self._safe_request("rxs_packetizer_40g_source_mac_set", intf, mac) @coroutine def set_predecimation_factor(self, factor): """ Sets the predecimation_factorfor data out of the packetiser Args: factor: (e.g. 1,2,4,8) """ yield self._safe_request("rxs_packetizer_edd_predecimation", factor) @coroutine def enable_snapshot(self, time=5): yield self._safe_request("rxs_packetizer_snapshot_enable_spec", time) yield self._safe_request("rxs_packetizer_snapshot_enable_spec") @coroutine def set_flipsignalspectrum(self, value): """ Sets the rxs-packetizer-edd-flipsignalspectrum data out of the packetiser Args: value: (e.g. 0, 1) """ yield self._safe_request("rxs_packetizer_edd_flipsignalspectrum", value) @coroutine def set_interface_address(self, intf, ip): """ Set the interface address for a packetiser qsfp interface Args: intf: The interface specified as a string integer, e.g. '0' or '1' ip: The IP address to assign to the interface """ yield self._safe_request("rxs_packetizer_40g_source_ip_set", intf, ip) @coroutine def capture_start(self): """ Start data transmission for both polarisation channels This method uses the packetisers 'capture-start' method which is an aggregate command that ensures all necessary flags on the packetiser and set for data transmission. This includes the 1PPS flag required by the ROACH2 boards. """ if not self._capture_started: """ Only start capture once and not twice if received configure """ self._capture_started = True yield self._safe_request("capture_start", "vh") @coroutine def configure(self, config): """ Applying configuration recieved in dictionary """ self._capture_started = False yield self._safe_request("capture_stop", "vh") yield self.set_sampling_rate(config["sampling_rate"]) yield self.set_predecimation(config["predecimation_factor"]) yield self.flip_spectrum(config["flip_spectrum"]) yield self.set_bit_width(config["bit_width"]) yield self.set_destinations(config["v_destinations"], config["h_destinations"]) if "noise_diode_frequency" in config: yield self.set_noise_diode_frequency( config["noise_diode_frequency"]) for interface, ip_address in config["interface_addresses"].items(): yield self.set_interface_address(interface, ip_address) if "sync_time" in config: yield self.synchronize(config["sync_time"]) else: yield self.synchronize() yield self.capture_start() @coroutine def deconfigure(self): """ Deconfigure. Not doing anythin """ raise Return() @coroutine def measurement_start(self): """ """ raise Return() @coroutine def measurement_stop(self): """ """ raise Return() @coroutine def measurement_prepare(self, config={}): """ """ if "noise_diode_frequency" in config: yield self.set_noise_diode_frequency( config["noise_diode_frequency"]) elif "noise_diode_pattern" in config: c = config["noise_diode_pattern"] yield self.set_noise_diode_firing_pattern(c["percentage"], c["period"]) raise Return() @coroutine def capture_stop(self): """ Stop data transmission for both polarisation channels """ _log.warning("Not stopping data transmission") raise Return() #yield self._safe_request("capture_stop", "vh") @coroutine def get_sync_time(self): """ Get the current packetiser synchronisation epoch Return: The synchronisation epoch as a unix time float """ response = yield self._safe_request("rxs_packetizer_40g_get_zero_time") sync_epoch = float(response.informs[0].arguments[0]) raise Return(sync_epoch) @coroutine def get_snapshot(self): """ Returns dictionary with snapshot data from the packetizer. """ response = yield self._safe_request("rxs_packetizer_snapshot_get_spec") res = {} for message in response.informs: key = message.arguments[0] if 'header' in key: res[key] = dict(band_width=float(message.arguments[1]) * 1e3, integration_time=float(message.arguments[2]), num_channels=int(message.arguments[3]), band_width_adc=float(message.arguments[4]), spec_counter=int(message.arguments[5]), timestamp=message.arguments[6]) elif 'adc' in key: res[key] = np.fromstring(message.arguments[1], dtype=np.float32) elif 'level' in key: res[key] = np.fromstring(message.arguments[1], dtype=np.int32) raise Return(res) @coroutine def synchronize(self, unix_time=None): """ Set the synchronisation epoch for the packetiser Args: unix_time: The unix time to synchronise at. If no value is provided a resonable value will be selected. When explicitly setting the synchronisation time it should be a second or two into the future allow enough time for communication with the packetiser. If the time is in the past by the time the request reaches the packetiser the next 1PPS tick will be selected. Users *must* call get_sync_time to get the actual time that was set. This call will block until the sync epoch has passed (i.e. if a sync epoch is chosen that is 10 second in the future, the call will block for 10 seconds). """ if not unix_time: unix_time = round(time.time() + 2) yield self._safe_request("synchronise", 0, unix_time) sync_epoch = yield self.get_sync_time() if sync_epoch != unix_time: _log.warning( "Requested sync time {} not equal to actual sync time {}". format(unix_time, sync_epoch)) @coroutine def populate_data_store(self, host, port): """ Populate the data store Args: host: ip of the data store to use port: port of the data store """ _log.debug("Populate data store @ {}:{}".format(host, port)) dataStore = EDDDataStore(host, port) _log.debug("Adding output formats to known data formats") descr = { "description": "Digitizer/Packetizer spead. One heap per packet.", "ip": None, "port": None, "bit_depth": None, # Dynamic Parameter "sample_rate": None, "sync_time": None, "samples_per_heap": 4096 } dataStore.addDataFormatDefinition("MPIFR_EDD_Packetizer:1", descr) raise Return()
class EddServerProductController(object): def __init__(self, product_id, address, port): """ Interface for pipeline instances using katcp. Args: product_id: A unique identifier for this product r2rm_addr: The address of the R2RM (ROACH2 resource manager) to be used by this product. Passed in tuple format, e.g. ("127.0.0.1", 5000) """ log.debug("Installing controller for {} at {}, {}".format( product_id, address, port)) self.ip = address self.port = port self._client = KATCPClientResource( dict(name="server-client_{}".format(product_id), address=(address, int(port)), controlled=True)) self._product_id = product_id self._client.start() @coroutine def _safe_request(self, request_name, *args, **kwargs): log.debug("Sending request '{}' to {} with arguments {}".format( request_name, self._product_id, args)) try: yield self._client.until_synced() response = yield self._client.req[request_name](*args, **kwargs) except Exception as E: log.error("Error processing request: {} in {}".format( E, self._product_id)) raise E if not response.reply.reply_ok(): erm = "'{}' request failed in {} with error: {}".format( request_name, self._product_id, response.reply.arguments[1]) log.error(erm) raise RuntimeError(erm) else: log.debug("'{}' request successful".format(request_name)) raise Return(response) @coroutine def deconfigure(self): """ @brief Deconfigure the product @detail """ yield self._safe_request('deconfigure', timeout=120.0) @coroutine def configure(self, config={}): """ @brief A no-op method for supporting the product controller interface. """ log.debug("Send cfg to {}".format(self._product_id)) yield self._safe_request("configure", json.dumps(config), timeout=120.0) @coroutine def capture_start(self): """ @brief A no-op method for supporting the product controller interface. """ yield self._safe_request("capture_start", timeout=120.0) @coroutine def capture_stop(self): """ @brief A no-op method for supporting the product controller interface. """ yield self._safe_request("capture_stop", timeout=120.0) @coroutine def measurement_prepare(self, config={}): """ @brief A no-op method for supporting the product controller interface. """ yield self._safe_request("measurement_prepare", json.dumps(config), timeout=120.0) @coroutine def measurement_start(self): """ @brief A no-op method for supporting the product controller interface. """ yield self._safe_request("measurement_start", timeout=60.0) @coroutine def measurement_stop(self): """ @brief A no-op method for supporting the product controller interface. """ yield self._safe_request("measurement_stop", timeout=60.0) @coroutine def set(self, config): """ @brief A no-op method for supporting the product controller interface. """ log.debug("Send set to {}".format(self._product_id)) yield self._safe_request("set", json.dumps(config), timeout=120.0) @coroutine def provision(self, config): """ @brief A no-op method for supporting the product controller interface. """ log.debug("Send provision to {}".format(self._product_id)) yield self._safe_request("provision", config, timeout=300.0) @coroutine def deprovision(self): """ @brief A no-op method for supporting the product controller interface. """ log.debug("Send deprovision to {}".format(self._product_id)) yield self._safe_request("deprovision", timeout=300.0) @coroutine def getConfig(self): """ @brief A no-op method for supporting the product controller interface. """ log.debug("Send get config to {}".format(self._product_id)) R = yield self._safe_request("sensor_value", "current-config", timeout=3) raise Return(json.loads(R.informs[0].arguments[-1])) @coroutine def ping(self): log.debug("Ping product {} at {}:{}.".format(self._product_id, self.ip, self.port)) try: yield self._client.until_synced(timeout=2) log.debug("product reachable") cfg = yield self.getConfig() if cfg['id'] != self._product_id: log.warning('Product id changed!') raise Return(False) log.debug("ID match") except TimeoutError: log.debug("Timeout Reached. Product inactive") raise Return(False) except Exception as E: log.error("Error during ping: {}".format(E)) raise Return(False) raise Return(True)
class DigitiserPacketiserClient(object): def __init__(self, host, port=7147): """ @brief Class for digitiser packetiser client. @param host The host IP or name for the desired packetiser KATCP interface @param port The port number for the desired packetiser KATCP interface """ self._host = host self._port = port self._client = KATCPClientResource( dict(name="digpack-client", address=(self._host, self._port), controlled=True)) self._client.start() def stop(self): self._client.stop() @coroutine def _safe_request(self, request_name, *args): log.info("Sending packetiser request '{}' with arguments {}".format( request_name, args)) yield self._client.until_synced() response = yield self._client.req[request_name](*args) if not response.reply.reply_ok(): log.error("'{}' request failed with error: {}".format( request_name, response.reply.arguments[1])) raise DigitiserPacketiserError(response.reply.arguments[1]) else: log.debug("'{}' request successful".format(request_name)) raise Return(response) @coroutine def _check_interfaces(self): log.debug("Checking status of 40 GbE interfaces") yield self._client.until_synced() @coroutine def _check_interface(name): log.debug("Checking status of '{}'".format(name)) sensor = self._client.sensor[ 'rxs_packetizer_40g_{}_am_lock_status'.format(name)] status = yield sensor.get_value() if not status == 0x0f: log.warning("Interface '{}' in error state".format(name)) raise PacketiserInterfaceError( "40-GbE interface '{}' did not boot".format(name)) else: log.debug("Interface '{}' is healthy".format(name)) yield _check_interface('iface00') yield _check_interface('iface01') @coroutine def set_sampling_rate(self, rate, retries=3): """ @brief Sets the sampling rate. @param rate The sampling rate in samples per second (e.g. 2.6 GHz should be passed as 2600000000.0) @detail To allow time for reinitialisation of the packetiser firmware during this call we enforce a 10 second sleep before the function returns. """ valid_modes = { 4000000000: ("virtex7_dk769b", "4.0GHz", 5), 2600000000: ("virtex7_dk769b", "2.6GHz", 3) } try: args = valid_modes[rate] except KeyError as error: msg = "Invalid sampling rate, valid sampling rates are: {}".format( valid_modes.keys()) log.error(msg) raise DigitiserPacketiserError(msg) attempts = 0 while True: response = yield self._safe_request("rxs_packetizer_system_reinit", *args) yield sleep(10) try: yield self._check_interfaces() except PacketiserInterfaceError as error: if attempts >= retries: raise error else: log.warning("Retrying system initalisation") attempts += 1 continue else: break @coroutine def set_bit_width(self, nbits): """ @brief Sets the number of bits per sample out of the packetiser @param nbits The desired number of bits per sample (e.g. 8 or 12) """ valid_modes = {8: "edd08", 12: "edd12"} try: mode = valid_modes[nbits] except KeyError as error: msg = "Invalid bit depth, valid bit depths are: {}".format( valid_modes.keys()) log.error(msg) raise DigitiserPacketiserError(msg) yield self._safe_request("rxs_packetizer_edd_switchmode", mode) @coroutine def set_destinations(self, v_dest, h_dest): """ @brief Sets the multicast destinations for data out of the packetiser @param v_dest The vertical polarisation channel destinations @param h_dest The horizontal polarisation channel destinations @detail The destinations should be provided as composite stream definition strings, e.g. 225.0.0.152+3:7148 (this defines four multicast groups: 225.0.0.152, 225.0.0.153, 225.0.0.154 and 225.0.0.155, all using port 7148). Currently the packetiser only accepts contiguous IP ranges for each set of destinations. """ yield self._safe_request("capture_destination", "v", v_dest) yield self._safe_request("capture_destination", "h", h_dest) @coroutine def set_interface_address(self, intf, ip): """ @brief Set the interface address for a packetiser qsfp interface @param intf The interface specified as a string integer, e.g. '0' or '1' @param ip The IP address to assign to the interface """ yield self._safe_request("rxs_packetizer_40g_source_ip_set", intf, ip) @coroutine def capture_start(self): """ @brief Start data transmission for both polarisation channels @detail This method uses the packetisers 'capture-start' method which is an aggregate command that ensures all necessary flags on the packetiser and set for data transmission. This includes the 1PPS flag required by the ROACH2 boards. """ yield self._safe_request("capture_start", "vh") @coroutine def capture_stop(self): """ @brief Stop data transmission for both polarisation channels """ yield self._safe_request("capture_stop", "vh") @coroutine def get_sync_time(self): """ @brief Get the current packetiser synchronisation epoch @return The synchronisation epoch as a unix time float """ response = yield self._safe_request("rxs_packetizer_40g_get_zero_time") sync_epoch = float(response.informs[0].arguments[0]) raise Return(sync_epoch) @coroutine def synchronize(self, unix_time=None): """ @brief Set the synchronisation epoch for the packetiser @param unix_time The unix time to synchronise at. If no value is provided a resonable value will be selected. @detail When explicitly setting the synchronisation time it should be a second or two into the future allow enough time for communication with the packetiser. If the time is in the past by the time the request reaches the packetiser the next 1PPS tick will be selected. Users *must* call get_sync_time to get the actual time that was set. This call will block until the sync epoch has passed (i.e. if a sync epoch is chosen that is 10 second in the future, the call will block for 10 seconds). @note The packetiser rounds to the nearest 1 PPS tick so it is recommended to set the """ if not unix_time: unix_time = round(time.time() + 2) yield self._safe_request("synchronise", 0, unix_time) sync_epoch = yield self.get_sync_time() if sync_epoch != unix_time: log.warning( "Requested sync time {} not equal to actual sync time {}". format(unix_time, sync_epoch))
class KATCPToIGUIConverter(object): def __init__(self, host, port, igui_host, igui_user, igui_pass, igui_device_id): """ @brief Class for katcp to igui converter. @param host KATCP host address @param port KATCP port number @param igui_host iGUI server hostname @param igui_user iGUI username @param igui_pass iGUI password @param igui_device_id iGUI device ID """ self.rc = KATCPClientResource( dict(name="test-client", address=(host, port), controlled=True)) self.host = host self.port = port self.igui_host = igui_host self.igui_user = igui_user self.igui_pass = igui_pass self.igui_group_id = None self.igui_device_id = igui_device_id self.igui_connection = IGUIConnection(self.igui_host, self.igui_user, self.igui_pass) self.igui_task_id = None self.igui_rxmap = None self.ioloop = None self.ic = None self.api_version = None self.implementation_version = None self.previous_sensors = set() def start(self): """ @brief Start the instance running @detail This call will trigger connection of the KATCPResource client and will login to the iGUI server. Once both connections are established the instance will retrieve a mapping of the iGUI receivers, devices and tasks and will try to identify the parent of the device_id provided in the constructor. @param self The object @return { description_of_the_return_value } """ @tornado.gen.coroutine def _start(): log.debug("Waiting on synchronisation with server") yield self.rc.until_synced() log.debug("Client synced") log.debug("Requesting version info") # This information can be used to get an iGUI device ID response = yield self.rc.req.version_list() log.info("response {}".format(response)) # for internal device KATCP server, response.informs[2].arguments return index out of range #_, api, implementation = response.informs[2].arguments #self.api_version = api #self.implementation_version = implementation #log.info("katcp-device API: {}".format(self.api_version)) #log.info("katcp-device implementation: {}".format(self.implementation_version)) self.ioloop.add_callback(self.update) log.debug("Starting {} instance".format(self.__class__.__name__)) # self.igui_connection.login() #self.igui_connection.login(self.igui_user, self.igui_pass) self.igui_rxmap = self.igui_connection.build_igui_representation() #log.debug(self.igui_rxmap) # Here we do a look up to find the parent of this device for rx in self.igui_rxmap: log.debug(rx.id) if self.igui_device_id in rx.devices._by_id.keys(): log.debug(self.igui_device_id) log.debug(rx.id) self.igui_rx_id = rx.id log.debug("Found Rx parent: {}".format(self.igui_rx_id)) break else: log.debug("Device '{}' is not a child of any receiver".format( self.igui_device_id)) raise IGUIMappingException( "Device '{}' is not a child of any receiver".format( self.igui_device_id)) #log.debug("iGUI representation:\n{}".format(self.igui_rxmap)) self.rc.start() self.ic = self.rc._inspecting_client self.ioloop = self.rc.ioloop self.ic.katcp_client.hook_inform( "interface-changed", lambda message: self.ioloop.add_callback(self.update)) self.ioloop.add_callback(_start) @tornado.gen.coroutine def update(self): """ @brief Synchronise with the KATCP servers sensors and register new listners """ log.debug("Waiting on synchronisation with server") yield self.rc.until_synced() log.debug("Client synced") current_sensors = set(self.rc.sensor.keys()) log.debug("Current sensor set: {}".format(current_sensors)) removed = self.previous_sensors.difference(current_sensors) log.debug("Sensors removed since last update: {}".format(removed)) added = current_sensors.difference(self.previous_sensors) log.debug("Sensors added since last update: {}".format(added)) for name in list(added): log.debug("Setting sampling strategy and callbacks on sensor '{}'". format(name)) # strat3 = ('event-rate', 2.0, 3.0) #event-rate doesn't work # self.rc.set_sampling_strategy(name, strat3) #KATCPSensorError: # Error setting strategy # not sure that auto means here self.rc.set_sampling_strategy(name, "auto") #self.rc.set_sampling_strategy(name, ["period", (10)]) #self.rc.set_sampling_strategy(name, "event") self.rc.set_sensor_listener(name, self._sensor_updated) self.previous_sensors = current_sensors def _sensor_updated(self, sensor, reading): """ @brief Callback to be executed on a sensor being updated @param sensor The sensor @param reading The sensor reading """ log.debug("Recieved sensor update for sensor '{}': {}".format( sensor.name, repr(reading))) try: rx = self.igui_rxmap.by_id(self.igui_rx_id) except KeyError: raise Exception("No iGUI receiver with ID {}".format( self.igui_rx_id)) try: device = rx.devices.by_id(self.igui_device_id) except KeyError: raise Exception("No iGUI device with ID {}".format( self.igui_device_id)) try: #self.igui_rxmap = self.igui_connection.build_igui_representation() #device = self.igui_rxmap.by_id(self.igui_rx_id).devices.by_id(self.igui_device_id) task = device.tasks.by_name(sensor.name) except KeyError: if (sensor.name[-3:] == 'PNG'): task = json.loads( self.igui_connection.create_task( device, (sensor.name, "NONE", "", "IMAGE", "GET_SET", "0", "0", "0", "-10000000000000000", "10000000000000000", "300"))) else: task = json.loads( self.igui_connection.create_task( device, (sensor.name, "NONE", "", "GETSET", "GET", "0", "0", "0", "-10000000000000000", "10000000000000000", "300"))) self.igui_task_id = str(task[0]['rx_task_id']) self.igui_connection.update_group_task_privileges( [self.igui_connection.igui_group_id, self.igui_task_id], "Y") self.igui_connection.update_group_task_privileges([ self.igui_connection.igui_group_id, self.igui_task_id, "update" ], "Y") self.igui_rxmap = self.igui_connection.build_igui_representation() device = self.igui_rxmap.by_id(self.igui_rx_id).devices.by_id( self.igui_device_id) task = device.tasks.by_id(self.igui_task_id) if (sensor.name[-3:] == 'PNG' ): # or some image type that we finally agreed on log.debug(sensor.name) log.debug(sensor.value) log.debug(len(sensor.value)) self.igui_connection.set_task_blob(task, reading.value) else: self.igui_connection.set_task_value(task, sensor.value) def stop(self): """ @brief Stop the client """ self.rc.stop()