Ejemplo n.º 1
0
    def __init__(self, host, port=7147):
        """
        Wraps katcp commands to control a digitiser/packetiser.

        Args:
            host:  The host IP or name for the desired packetiser KATCP interface
            port:  The port number for the desired packetiser KATCP interface
        """
        self._host = host
        self._port = port
        self._client = KATCPClientResource(
            dict(name="digpack-client",
                 address=(self._host, self._port),
                 controlled=True))
        self._client.start()
        self._capture_started = False

        self._sampling_modes = {
            4096000000: ("virtex7_dk769b", "4.096GHz", 3),
            4000000000: ("virtex7_dk769b", "4.0GHz", 5),
            3600000000: ("virtex7_dk769b", "3.6GHz", 7),
            3520000000: ("virtex7_dk769b", "3.52GHz", 7),
            3500000000: ("virtex7_dk769b", "3.5GHz", 7),
            3200000000: ("virtex7_dk769b", "3.2GHz", 9),
            2600000000: ("virtex7_dk769b", "2.6GHz", 3),
            2560000000: ("virtex7_dk769b", "2.56GHz", 2),
            1750000000: (
                "virtex7_dk769b_test146.mkt", "3.5GHz", 7
            )  # This is  a special mode for the meerkat digitial filter cores inside the edd.
            # An effective 1750 Mhz sampling rate/ 875MHz
            # bandwidth  is achieved by digitial filtering of
            # the 3.5GHz sampled rate!
        }  # This is quite hacky, and the design of this client has to be  has to be improved. Possibly by ahving a client per firmware
        self.__firmware = None
Ejemplo n.º 2
0
    def __init__(self, host, port, igui_host, igui_user, igui_pass,
                 igui_device_id):
        """
        @brief      Class for katcp to igui converter.

        @param   host             KATCP host address
        @param   port             KATCP port number
        @param   igui_host        iGUI server hostname
        @param   igui_user        iGUI username
        @param   igui_pass        iGUI password
        @param   igui_device_id   iGUI device ID
        """
        self.rc = KATCPClientResource(
            dict(name="test-client", address=(host, port), controlled=True))
        self.host = host
        self.port = port
        self.igui_host = igui_host
        self.igui_user = igui_user
        self.igui_pass = igui_pass
        self.igui_group_id = None
        self.igui_device_id = igui_device_id
        self.igui_connection = IGUIConnection(self.igui_host, self.igui_user,
                                              self.igui_pass)
        self.igui_task_id = None
        self.igui_rxmap = None
        self.ioloop = None
        self.ic = None
        self.api_version = None
        self.implementation_version = None
        self.previous_sensors = set()
 def set_configuration_authority(self, hostname, port):
     if self._ca_client:
         self._ca_client.stop()
     self._ca_client = KATCPClientResource(dict(
         name = 'configuration-authority-client',
         address = (hostname, port),
         controlled = True))
     self._ca_client.start()
     self._ca_address_sensor.set_value("{}:{}".format(hostname, port))
Ejemplo n.º 4
0
class EddFitsInterfaceClient(object):
    """
    Wrapper class for a KATCP client to a EddFitsInterfaceServer
    """
    def __init__(self, name, address):
        """
        @brief      Construct new instance

        @param      parent            The parent EddFitsInterfaceMasterController instance
        """
        self.log = logging.getLogger("mpikat.edd_fi.{}".format(name))
        self._fits_interface_client = KATCPClientResource(
            dict(name="fits-interface-client",
                 address=address,
                 controlled=True))
        self._fits_interface_client.start()

    @coroutine
    def _request_helper(self, name, *args, **kwargs):
        if kwargs.pop("presync", None):
            yield self._fits_interface_client.until_synced(2)
        response = yield self._fits_interface_client.req[name](*args)
        if not response.reply.reply_ok():
            self.log.error("Error on {} request: {}".format(
                name, response.reply.arguments[1]))
            raise EddFitsInterfaceClientError(response.reply.arguments[1])

    @coroutine
    def configure(self, config):
        """
        @brief      Configure the attached FITS writer interface

        @param      config  A dictionary containing configuration information.
        """
        yield self._fits_interface_client.until_synced(2)
        nbeams = config["nbeams"]
        nchans = config["nchans"]
        integration_time = config["integration_time"]
        blank_phases = config["blank_phases"]
        yield self._request_helper("configure", nbeams, nchans,
                                   integration_time, blank_phases)

    @coroutine
    def capture_start(self):
        """
        @brief      Start the FITS interface capturing data
        """
        yield self._request_helper("start")

    @coroutine
    def capture_stop(self):
        """
        @brief      Stop the FITS interface from capturing data
        """
        yield self._request_helper("stop")
 def setUp(self):
     super(TestDelayBufferController, self).setUp()
     self._beam_manager = BeamManager(32, KATPOINT_ANTENNAS)
     self._delay_config_server = DelayConfigurationServer(
         "127.0.0.1", 0, self._beam_manager)
     self._delay_config_server.start()
     self._delay_client = KATCPClientResource(dict(
         name="delay-configuration-client",
         address=self._delay_config_server.bind_address,
         controlled=True))
     self._delay_client.start()
Ejemplo n.º 6
0
    def __init__(self, name, address):
        """
        @brief      Construct new instance

        @param      parent            The parent EddFitsInterfaceMasterController instance
        """
        self.log = logging.getLogger("mpikat.edd_fi.{}".format(name))
        self._fits_interface_client = KATCPClientResource(
            dict(name="fits-interface-client",
                 address=address,
                 controlled=True))
        self._fits_interface_client.start()
Ejemplo n.º 7
0
    def __init__(self, host, port):
        """
        Constructs a new instance.

        :param      host:  The address of the server to sidecar
        :param      port:  The server port
        """
        log.debug("Constructing sidecar for {}:{}".format(host, port))
        self.rc = KATCPClientResource(
            dict(name="sidecar-client", address=(host, port), controlled=True))
        self._update_callbacks = set()
        self._previous_sensors = set()
class TestDelayBufferController(AsyncTestCase):
    def setUp(self):
        super(TestDelayBufferController, self).setUp()
        self._beam_manager = BeamManager(32, KATPOINT_ANTENNAS)
        self._delay_config_server = DelayConfigurationServer(
            "127.0.0.1", 0, self._beam_manager)
        self._delay_config_server.start()
        self._delay_client = KATCPClientResource(dict(
            name="delay-configuration-client",
            address=self._delay_config_server.bind_address,
            controlled=True))
        self._delay_client.start()

    def tearDown(self):
        super(TestDelayBufferController, self).tearDown()
        self._delay_config_server.stop()

    @gen_test(timeout=20)
    def test_online_mode(self):
        beam_ids = ["cfbf{:05d}".format(i) for i in range(32)]
        for beam_id in beam_ids:
            self._beam_manager.add_beam(
                Target('{},radec,12:00:00,01:00:00'.format(beam_id)))
        antenna_ids = ["m{:03d}".format(i) for i in range(7, 7+4)]
        controller = DelayBufferController(
            self._delay_client, beam_ids, antenna_ids, 1, offline=False)
        yield controller.start()
        yield sleep(5)
        controller.stop()

    @gen_test(timeout=20)
    def test_offline_mode(self):
        def update_delay_via_socket():
            sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
            sock.connect(CONTROL_SOCKET_ADDR)
            sock.sendall(struct.pack("d", 1554051922.649372))
            response = struct.unpack("b", sock.recv(1))[0]
            if response == 0:
                self.fail("Could not update delays")

        beam_ids = ["cfbf{:05d}".format(i) for i in range(32)]
        for beam_id in beam_ids:
            self._beam_manager.add_beam(
                Target('{},radec,12:00:00,01:00:00'.format(beam_id)))
        antenna_ids = ["m{:03d}".format(i) for i in range(7, 7+4)]
        controller = DelayBufferController(
            self._delay_client, beam_ids, antenna_ids, 1, offline=True)
        yield controller.start()
        update_delay_via_socket()
        controller.stop()
Ejemplo n.º 9
0
    def __init__(self, host, port=7147):
        """
        @brief      Class for digitiser packetiser client.

        @param      host   The host IP or name for the desired packetiser KATCP interface
        @param      port   The port number for the desired packetiser KATCP interface
        """
        self._host = host
        self._port = port
        self._client = KATCPClientResource(
            dict(name="digpack-client",
                 address=(self._host, self._port),
                 controlled=True))
        self._client.start()
Ejemplo n.º 10
0
class WorkerWrapper(object):
    """Wrapper around a client to an FbfWorkerServer
    instance.
    """
    def __init__(self, hostname, port):
        """
        @brief  Create a new wrapper around a client to a worker server

        @params hostname The hostname for the worker server
        @params port     The port number that the worker server serves on
        """
        log.debug("Creating worker client to worker at {}:{}".format(hostname, port))
        self._client = KATCPClientResource(dict(
            name="worker-server-client",
            address=(hostname, port),
            controlled=True))
        self.hostname = hostname
        self.port = port
        self.priority = 0 # Currently no priority mechanism is implemented
        self._started = False

    def start(self):
        """
        @brief  Start the client to the worker server
        """
        log.debug("Starting client to worker at {}:{}".format(self.hostname, self.port))
        self._client.start()
        self._started = True

    def __repr__(self):
        return "<{} @ {}:{}>".format(self.__class__.__name__, self.hostname, self.port)

    def __hash__(self):
        # This has override is required to allow these wrappers
        # to be used with set() objects. The implication is that
        # the combination of hostname and port is unique for a
        # worker server
        return hash((self.hostname, self.port))

    def __eq__(self, other):
        # Also implemented to help with hashing
        # for sets
        return self.__hash__() == hash(other)

    def __del__(self):
        if self._started:
            try:
                self._client.stop()
            except Exception as error:
                log.exception(str(error))
 def __init__(self, parent, product_id, r2rm_addr):
     """
     Args:
       parent:            The parent EddRoach2MasterController instance
       product_id:        A unique identifier for this product
       r2rm_addr:         The address of the R2RM (ROACH2 resource manager) to be
                                   used by this product. Passed in tuple format,
                                   e.g. ("127.0.0.1", 5000)
     """
     super(EddRoach2ProductController, self).__init__(parent, product_id)
     self._r2rm_client = KATCPClientResource(
         dict(name="r2rm-client", address=r2rm_addr, controlled=True))
     self._r2rm_client.start()
     self._firmware = None
     self._icom_id = None
Ejemplo n.º 12
0
    def __init__(self, hostname, port):
        """
        @brief  Create a new wrapper around a client to a worker server

        @params hostname The hostname for the worker server
        @params port     The port number that the worker server serves on
        """
        log.debug("Creating worker client to worker at {}:{}".format(hostname, port))
        self._client = KATCPClientResource(dict(
            name="worker-server-client",
            address=(hostname, port),
            controlled=True))
        self.hostname = hostname
        self.port = port
        self.priority = 0 # Currently no priority mechanism is implemented
        self._started = False
Ejemplo n.º 13
0
    def __init__(self, product_id, address, port):
        """
        Interface for pipeline instances using katcp.

        Args:
            product_id:        A unique identifier for this product
            r2rm_addr:         The address of the R2RM (ROACH2 resource manager) to be
                     used by this product. Passed in tuple format,
                     e.g. ("127.0.0.1", 5000)
        """
        log.debug("Installing controller for {} at {}, {}".format(
            product_id, address, port))
        self.ip = address
        self.port = port
        self._client = KATCPClientResource(
            dict(name="server-client_{}".format(product_id),
                 address=(address, int(port)),
                 controlled=True))

        self._product_id = product_id
        self._client.start()
Ejemplo n.º 14
0
def setup_32beam_4ant(worker_addr, dc_addr):

    # Hardcoded numbers
    nbeams = 32
    tot_nchans = 4096
    feng_groups = "spead://239.8.0.0+3:7148"
    chan0_idx = 0
    chan0_freq = 1240e6
    chan_bw = 856e6 / tot_nchans

    dc_client = KATCPClientResource(
        dict(name="delay-configuration-client",
             address=dc_addr,
             controlled=True))
    yield dc_client.start()
    print "Syncing delay client"
    yield dc_client.until_synced(timeout=4.0)
    print "Synced"
    antennas_json = yield dc_client.sensor['antennas'].get_value()
    print "done"
    antennas = json.loads(antennas_json)
    print antennas
    worker_client = KATCPClientResource(
        dict(name="worker-server-client", address=worker_addr,
             controlled=True))
    yield worker_client.start()
    print "Syncing worker server"
    yield worker_client.until_synced(timeout=4.0)
    print "done"

    coherent_beams_csv = ",".join(
        ["cfbf{:05d}".format(ii) for ii in range(nbeams)])
    feng_antenna_map = {antenna: ii for ii, antenna in enumerate(antennas)}
    coherent_beam_antennas = antennas
    incoherent_beam_antennas = antennas
    nantennas = len(antennas)
    nchans_per_group = tot_nchans / nantennas / 4
    mcast_to_beam_map = {
        "spead://239.11.1.0:7148": coherent_beams_csv,
        "spead://239.11.1.150:7148": "ifbf00001"
    }
    feng_config = {
        "bandwidth": 856e6,
        "centre-frequency": 1200e6,
        "sideband": "upper",
        "feng-antenna-map": feng_antenna_map,
        "sync-epoch": 12353524243.0,
        "nchans": 4096
    }
    coherent_beam_config = {
        "tscrunch": 16,
        "fscrunch": 1,
        "antennas": ",".join(coherent_beam_antennas)
    }
    incoherent_beam_config = {
        "tscrunch": 16,
        "fscrunch": 1,
        "antennas": ",".join(incoherent_beam_antennas)
    }

    print "Making prepare request"
    response = yield worker_client.req.prepare(
        feng_groups,
        nchans_per_group,
        chan0_idx,
        chan0_freq,
        chan_bw,
        nbeams,
        json.dumps(mcast_to_beam_map),
        json.dumps(feng_config),
        json.dumps(coherent_beam_config),
        json.dumps(incoherent_beam_config),
        *dc_addr,
        timeout=300.0)
    if not response.reply.reply_ok():
        raise Exception("Error on prepare: {}".format(
            response.reply.arguments))
    else:
        print "prepare done"
Ejemplo n.º 15
0
class KATCPToIGUIConverter(object):
    def __init__(self, host, port, igui_host, igui_user, igui_pass,
                 igui_device_id):
        """
        @brief      Class for katcp to igui converter.

        @param   host             KATCP host address
        @param   port             KATCP port number
        @param   igui_host        iGUI server hostname
        @param   igui_user        iGUI username
        @param   igui_pass        iGUI password
        @param   igui_device_id   iGUI device ID
        """
        self.rc = KATCPClientResource(
            dict(name="test-client", address=(host, port), controlled=True))
        self.host = host
        self.port = port
        self.igui_host = igui_host
        self.igui_user = igui_user
        self.igui_pass = igui_pass
        self.igui_group_id = None
        self.igui_device_id = igui_device_id
        self.igui_connection = IGUIConnection(self.igui_host, self.igui_user,
                                              self.igui_pass)
        self.igui_task_id = None
        self.igui_rxmap = None
        self.ioloop = None
        self.ic = None
        self.api_version = None
        self.implementation_version = None
        self.previous_sensors = set()

    def start(self):
        """
        @brief      Start the instance running

        @detail     This call will trigger connection of the KATCPResource client and
                    will login to the iGUI server. Once both connections are established
                    the instance will retrieve a mapping of the iGUI receivers, devices
                    and tasks and will try to identify the parent of the device_id
                    provided in the constructor.

        @param      self  The object

        @return     { description_of_the_return_value }
        """
        @tornado.gen.coroutine
        def _start():
            log.debug("Waiting on synchronisation with server")
            yield self.rc.until_synced()
            log.debug("Client synced")
            log.debug("Requesting version info")
            # This information can be used to get an iGUI device ID
            response = yield self.rc.req.version_list()
            log.info("response {}".format(response))
            # for internal device KATCP server, response.informs[2].arguments return index out of range
            #_, api, implementation = response.informs[2].arguments
            #self.api_version = api
            #self.implementation_version = implementation
            #log.info("katcp-device API: {}".format(self.api_version))
            #log.info("katcp-device implementation: {}".format(self.implementation_version))
            self.ioloop.add_callback(self.update)

        log.debug("Starting {} instance".format(self.__class__.__name__))
        # self.igui_connection.login()
        #self.igui_connection.login(self.igui_user, self.igui_pass)
        self.igui_rxmap = self.igui_connection.build_igui_representation()
        #log.debug(self.igui_rxmap)
        # Here we do a look up to find the parent of this device
        for rx in self.igui_rxmap:
            log.debug(rx.id)
            if self.igui_device_id in rx.devices._by_id.keys():
                log.debug(self.igui_device_id)
                log.debug(rx.id)
                self.igui_rx_id = rx.id
                log.debug("Found Rx parent: {}".format(self.igui_rx_id))
                break
        else:
            log.debug("Device '{}' is not a child of any receiver".format(
                self.igui_device_id))
            raise IGUIMappingException(
                "Device '{}' is not a child of any receiver".format(
                    self.igui_device_id))

        #log.debug("iGUI representation:\n{}".format(self.igui_rxmap))
        self.rc.start()
        self.ic = self.rc._inspecting_client
        self.ioloop = self.rc.ioloop
        self.ic.katcp_client.hook_inform(
            "interface-changed",
            lambda message: self.ioloop.add_callback(self.update))
        self.ioloop.add_callback(_start)

    @tornado.gen.coroutine
    def update(self):
        """
        @brief    Synchronise with the KATCP servers sensors and register new listners
        """
        log.debug("Waiting on synchronisation with server")
        yield self.rc.until_synced()
        log.debug("Client synced")
        current_sensors = set(self.rc.sensor.keys())
        log.debug("Current sensor set: {}".format(current_sensors))
        removed = self.previous_sensors.difference(current_sensors)
        log.debug("Sensors removed since last update: {}".format(removed))
        added = current_sensors.difference(self.previous_sensors)
        log.debug("Sensors added since last update: {}".format(added))
        for name in list(added):
            log.debug("Setting sampling strategy and callbacks on sensor '{}'".
                      format(name))
            # strat3 = ('event-rate', 2.0, 3.0)              #event-rate doesn't work
            # self.rc.set_sampling_strategy(name, strat3)    #KATCPSensorError:
            # Error setting strategy
            # not sure that auto means here
            self.rc.set_sampling_strategy(name, "auto")
            #self.rc.set_sampling_strategy(name, ["period", (10)])
            #self.rc.set_sampling_strategy(name, "event")
            self.rc.set_sensor_listener(name, self._sensor_updated)
        self.previous_sensors = current_sensors

    def _sensor_updated(self, sensor, reading):
        """
        @brief      Callback to be executed on a sensor being updated

        @param      sensor   The sensor
        @param      reading  The sensor reading
        """
        log.debug("Recieved sensor update for sensor '{}': {}".format(
            sensor.name, repr(reading)))
        try:
            rx = self.igui_rxmap.by_id(self.igui_rx_id)
        except KeyError:
            raise Exception("No iGUI receiver with ID {}".format(
                self.igui_rx_id))
        try:
            device = rx.devices.by_id(self.igui_device_id)
        except KeyError:
            raise Exception("No iGUI device with ID {}".format(
                self.igui_device_id))
        try:
            #self.igui_rxmap = self.igui_connection.build_igui_representation()
            #device = self.igui_rxmap.by_id(self.igui_rx_id).devices.by_id(self.igui_device_id)
            task = device.tasks.by_name(sensor.name)
        except KeyError:
            if (sensor.name[-3:] == 'PNG'):
                task = json.loads(
                    self.igui_connection.create_task(
                        device, (sensor.name, "NONE", "", "IMAGE", "GET_SET",
                                 "0", "0", "0", "-10000000000000000",
                                 "10000000000000000", "300")))
            else:
                task = json.loads(
                    self.igui_connection.create_task(
                        device, (sensor.name, "NONE", "", "GETSET", "GET", "0",
                                 "0", "0", "-10000000000000000",
                                 "10000000000000000", "300")))
            self.igui_task_id = str(task[0]['rx_task_id'])
            self.igui_connection.update_group_task_privileges(
                [self.igui_connection.igui_group_id, self.igui_task_id], "Y")
            self.igui_connection.update_group_task_privileges([
                self.igui_connection.igui_group_id, self.igui_task_id, "update"
            ], "Y")
            self.igui_rxmap = self.igui_connection.build_igui_representation()
            device = self.igui_rxmap.by_id(self.igui_rx_id).devices.by_id(
                self.igui_device_id)
            task = device.tasks.by_id(self.igui_task_id)

        if (sensor.name[-3:] == 'PNG'
            ):  # or some image type that we finally agreed on
            log.debug(sensor.name)
            log.debug(sensor.value)
            log.debug(len(sensor.value))
            self.igui_connection.set_task_blob(task, reading.value)
        else:
            self.igui_connection.set_task_value(task, sensor.value)

    def stop(self):
        """
        @brief      Stop the client
        """
        self.rc.stop()
class EddRoach2ProductController(ProductController):
    """
    Wrapper class for an EDD ROACH2 product.
    """
    def __init__(self, parent, product_id, r2rm_addr):
        """
        Args:
          parent:            The parent EddRoach2MasterController instance
          product_id:        A unique identifier for this product
          r2rm_addr:         The address of the R2RM (ROACH2 resource manager) to be
                                      used by this product. Passed in tuple format,
                                      e.g. ("127.0.0.1", 5000)
        """
        super(EddRoach2ProductController, self).__init__(parent, product_id)
        self._r2rm_client = KATCPClientResource(
            dict(name="r2rm-client", address=r2rm_addr, controlled=True))
        self._r2rm_client.start()
        self._firmware = None
        self._icom_id = None

    def setup_sensors(self):
        """
        Setup the default KATCP sensors.

        Note:
            As this call is made only upon an EDD product configure call a mass
            inform is required to let connected clients know that the proxy
            interface has changed.
        """
        super(EddRoach2ProductController, self).setup_sensors()
        self._firmware_server_sensor = Sensor.string(
            "firmware-server",
            description=
            "The address of the firmware server started by this product",
            default="",
            initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._firmware_server_sensor)
        self._parent.mass_inform(Message.inform('interface-changed'))

    @state_change(["capturing", "error"], "idle")
    @coroutine
    def deconfigure(self):
        """
        Deconfigure the product

        This method will remove any product sensors that were added to the
        parent master controller.
        """
        yield self._r2rm_client.until_synced(2)
        response = yield self._r2rm_client.req.force_deconfigure_board(
            self._icom_id)
        if not response.reply.reply_ok():
            self.log.error("Error on deconfigure request: {}".format(
                response.reply.arguments[1]))
            raise EddRoach2ProductError(response.reply.arguments[1])
        self.teardown_sensors()
        self._firmware = None
        self._icom_id = None

    @state_change(["idle", "error"], "capturing", "preparing")
    @coroutine
    def configure(self, config):
        """
        Configure the roach2 product

        Args:
            config:  A dictionary containing configuration information.
                     The dictionary should have a form similar to::

                          {
                              "id": "roach2_spectrometer",
                              "type": "roach2",
                              "icom_id": "R2-EDD",
                              "firmware": "EDDFirmware",
                              "commands":
                              [
                                  ["program", []],
                                  ["start", []],
                                  ["set_integration_period", [1000.0]],
                                  ["set_destination_address", ["10.10.1.12", 60001]]
                              ]
                          }

        This method will request the specified roach2 board from the R2RM server
        and request a firmware deployment. The values of the 'icom_id' and 'firmware'
        must correspond to valid managed roach2 boards and firmwares as understood by
        the R2RM server.
        """
        log.debug("Syncing with R2RM server")
        yield self._r2rm_client.until_synced(2)
        self._icom_id = config["icom_id"]
        self._firmware = config["firmware"]
        log.debug("Trying to force deconfiguring board")
        response = yield self._r2rm_client.req.force_deconfigure_board(
            self._icom_id)
        if not response.reply.reply_ok():
            self.log.warning("Unable to deconfigure ROACH2 board: {}".format(
                response.reply.arguments[1]))
        log.debug("Sending configure request to R2RM server")
        response = yield self._r2rm_client.req.configure_board(self._icom_id,
                                                               EDD_R2RM_USER,
                                                               self._firmware,
                                                               timeout=20)
        if not response.reply.reply_ok():
            self.log.error("Error on configure request: {}".format(
                response.reply.arguments[1]))
            raise EddRoach2ProductError(response.reply.arguments[1])
        _, firmware_ip, firmware_port = response.reply.arguments
        log.debug(
            "Connecting client to activated firmware server @ {}:{}".format(
                firmware_ip, firmware_port))
        firmware_client = KATCPClientResource(
            dict(name="firmware-client",
                 address=(firmware_ip, firmware_port),
                 controlled=True))
        firmware_client.start()
        log.debug("Syncing with firmware client")
        yield firmware_client.until_synced(2)
        for command, args in config["commands"]:
            log.debug(
                "Sending firmware server request '{}' with args '{}'".format(
                    command, args))
            response = yield firmware_client.req[command](*args, timeout=20)
            if not response.reply.reply_ok():
                self.log.error("Error on {}->{} request: {}".format(
                    command, args, response.reply.arguments[1]))
                raise EddRoach2ProductError(response.reply.arguments[1])
        log.debug("Stopping client connection to firmware server")
        firmware_client.stop()

    @coroutine
    def capture_start(self):
        """
        A no-op method for supporting the product controller interface.
        """
        pass

    @coroutine
    def capture_stop(self):
        """
        A no-op method for supporting the product controller interface.
        """
        pass
    def configure(self, config):
        """
        Configure the roach2 product

        Args:
            config:  A dictionary containing configuration information.
                     The dictionary should have a form similar to::

                          {
                              "id": "roach2_spectrometer",
                              "type": "roach2",
                              "icom_id": "R2-EDD",
                              "firmware": "EDDFirmware",
                              "commands":
                              [
                                  ["program", []],
                                  ["start", []],
                                  ["set_integration_period", [1000.0]],
                                  ["set_destination_address", ["10.10.1.12", 60001]]
                              ]
                          }

        This method will request the specified roach2 board from the R2RM server
        and request a firmware deployment. The values of the 'icom_id' and 'firmware'
        must correspond to valid managed roach2 boards and firmwares as understood by
        the R2RM server.
        """
        log.debug("Syncing with R2RM server")
        yield self._r2rm_client.until_synced(2)
        self._icom_id = config["icom_id"]
        self._firmware = config["firmware"]
        log.debug("Trying to force deconfiguring board")
        response = yield self._r2rm_client.req.force_deconfigure_board(
            self._icom_id)
        if not response.reply.reply_ok():
            self.log.warning("Unable to deconfigure ROACH2 board: {}".format(
                response.reply.arguments[1]))
        log.debug("Sending configure request to R2RM server")
        response = yield self._r2rm_client.req.configure_board(self._icom_id,
                                                               EDD_R2RM_USER,
                                                               self._firmware,
                                                               timeout=20)
        if not response.reply.reply_ok():
            self.log.error("Error on configure request: {}".format(
                response.reply.arguments[1]))
            raise EddRoach2ProductError(response.reply.arguments[1])
        _, firmware_ip, firmware_port = response.reply.arguments
        log.debug(
            "Connecting client to activated firmware server @ {}:{}".format(
                firmware_ip, firmware_port))
        firmware_client = KATCPClientResource(
            dict(name="firmware-client",
                 address=(firmware_ip, firmware_port),
                 controlled=True))
        firmware_client.start()
        log.debug("Syncing with firmware client")
        yield firmware_client.until_synced(2)
        for command, args in config["commands"]:
            log.debug(
                "Sending firmware server request '{}' with args '{}'".format(
                    command, args))
            response = yield firmware_client.req[command](*args, timeout=20)
            if not response.reply.reply_ok():
                self.log.error("Error on {}->{} request: {}".format(
                    command, args, response.reply.arguments[1]))
                raise EddRoach2ProductError(response.reply.arguments[1])
        log.debug("Stopping client connection to firmware server")
        firmware_client.stop()
class FbfProductController(object):
    """
    Wrapper class for an FBFUSE product.
    """
    STATES = ["idle", "preparing", "ready", "starting", "capturing", "stopping", "error"]
    IDLE, PREPARING, READY, STARTING, CAPTURING, STOPPING, ERROR = STATES

    def __init__(self, parent, product_id, katpoint_antennas,
                 n_channels, feng_streams, proxy_name, feng_config):
        """
        @brief      Construct new instance

        @param      parent            The parent FbfMasterController instance

        @param      product_id        The name of the product

        @param      katpoint_antennas A list of katpoint.Antenna objects

        @param      n_channels        The integer number of frequency channels provided by the CBF.

        @param      feng_streams      A string describing the multicast groups containing F-enging data
                                      (in the form: spead://239.11.1.150+15:7147)

        @param      proxy_name        The name of the proxy associated with this subarray (used as a sensor prefix)

        #NEED FENG CONFIG

        @param      servers           A list of FbfWorkerServer instances allocated to this product controller
        """
        self.log = logging.getLogger("mpikat.fbfuse_product_controller.{}".format(product_id))
        self.log.debug("Creating new FbfProductController with args: {}".format(
            ", ".join([str(i) for i in (parent, product_id, katpoint_antennas, n_channels,
                feng_streams, proxy_name, feng_config)])))
        self._parent = parent
        self._product_id = product_id
        self._antennas = ",".join([a.name for a in katpoint_antennas])
        self._katpoint_antennas = katpoint_antennas
        self._antenna_map = {a.name: a for a in self._katpoint_antennas}
        self._n_channels = n_channels
        self._streams = ip_range_from_stream(feng_streams)
        self._proxy_name = proxy_name
        self._feng_config = feng_config
        self._servers = []
        self._beam_manager = None
        self._delay_config_server = None
        self._ca_client = None
        self._previous_sb_config = None
        self._managed_sensors = []
        self._ibc_mcast_group = None
        self._cbc_mcast_groups = None
        self._default_sb_config = {
            u'coherent-beams-nbeams':400,
            u'coherent-beams-tscrunch':16,
            u'coherent-beams-fscrunch':1,
            u'coherent-beams-antennas':self._antennas,
            u'coherent-beams-granularity':6,
            u'incoherent-beam-tscrunch':16,
            u'incoherent-beam-fscrunch':1,
            u'incoherent-beam-antennas':self._antennas,
            u'bandwidth':self._feng_config['bandwidth'],
            u'centre-frequency':self._feng_config['centre-frequency']}
        self.setup_sensors()

    def __del__(self):
        self.teardown_sensors()

    def info(self):
        """
        @brief    Return a metadata dictionary describing this product controller
        """
        out = {
            "antennas":self._antennas,
            "nservers":len(self.servers),
            "capturing":self.capturing,
            "streams":self._streams,
            "nchannels":self._n_channels,
            "proxy_name":self._proxy_name
        }
        return out

    def add_sensor(self, sensor):
        """
        @brief    Add a sensor to the parent object

        @note     This method is used to wrap calls to the add_sensor method
                  on the parent FbfMasterController instance. In order to
                  disambiguate between sensors from describing different products
                  the associated proxy name is used as sensor prefix. For example
                  the "servers" sensor will be seen by clients connected to the
                  FbfMasterController server as "<proxy_name>-servers" (e.g.
                  "FBFUSE_1-servers").
        """
        prefix = "{}.".format(self._product_id)
        if sensor.name.startswith(prefix):
            self._parent.add_sensor(sensor)
        else:
            sensor.name = "{}{}".format(prefix,sensor.name)
            self._parent.add_sensor(sensor)
        self._managed_sensors.append(sensor)

    def setup_sensors(self):
        """
        @brief    Setup the default KATCP sensors.

        @note     As this call is made only upon an FBFUSE configure call a mass inform
                  is required to let connected clients know that the proxy interface has
                  changed.
        """
        self._state_sensor = LoggingSensor.discrete(
            "state",
            description = "Denotes the state of this FBF instance",
            params = self.STATES,
            default = self.IDLE,
            initial_status = Sensor.NOMINAL)
        self._state_sensor.set_logger(self.log)
        self.add_sensor(self._state_sensor)

        self._ca_address_sensor = Sensor.string(
            "configuration-authority",
            description = "The address of the server that will be deferred to for configurations",
            default = "",
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._ca_address_sensor)

        self._available_antennas_sensor = Sensor.string(
            "available-antennas",
            description = "The antennas that are currently available for beamforming",
            default = json.dumps({antenna.name:antenna.format_katcp() for antenna in self._katpoint_antennas}),
            initial_status = Sensor.NOMINAL)
        self.add_sensor(self._available_antennas_sensor)

        self._phase_reference_sensor = Sensor.string(
            "phase-reference",
            description="A KATPOINT target string denoting the F-engine phasing centre",
            default="unset,radec,0,0",
            initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._phase_reference_sensor)

        reference_antenna = Antenna("reference,{ref.lat},{ref.lon},{ref.elev}".format(
            ref=self._katpoint_antennas[0].ref_observer))
        self._reference_antenna_sensor = Sensor.string(
            "reference-antenna",
            description="A KATPOINT antenna string denoting the reference antenna",
            default=reference_antenna.format_katcp(),
            initial_status=Sensor.NOMINAL)
        self.add_sensor(self._reference_antenna_sensor)

        self._bandwidth_sensor = Sensor.float(
            "bandwidth",
            description = "The bandwidth this product is configured to process",
            default = self._default_sb_config['bandwidth'],
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._bandwidth_sensor)

        self._nchans_sensor = Sensor.integer(
            "nchannels",
            description = "The number of channels to be processesed",
            default = self._n_channels,
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._nchans_sensor)

        self._cfreq_sensor = Sensor.float(
            "centre-frequency",
            description = "The centre frequency of the band this product configured to process",
            default = self._default_sb_config['centre-frequency'],
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._cfreq_sensor)

        self._cbc_nbeams_sensor = Sensor.integer(
            "coherent-beam-count",
            description = "The number of coherent beams that this FBF instance can currently produce",
            default = self._default_sb_config['coherent-beams-nbeams'],
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._cbc_nbeams_sensor)

        self._cbc_nbeams_per_group = Sensor.integer(
            "coherent-beam-count-per-group",
            description = "The number of coherent beams packed into a multicast group",
            default = 1,
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._cbc_nbeams_per_group)

        self._cbc_ngroups = Sensor.integer(
            "coherent-beam-ngroups",
            description = "The number of multicast groups used for coherent beam transmission",
            default = 1,
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._cbc_ngroups)

        self._cbc_nbeams_per_server_set = Sensor.integer(
            "coherent-beam-nbeams-per-server-set",
            description = "The number of beams produced by each server set",
            default = 1,
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._cbc_nbeams_per_server_set)

        self._cbc_tscrunch_sensor = Sensor.integer(
            "coherent-beam-tscrunch",
            description = "The number time samples that will be integrated when producing coherent beams",
            default = self._default_sb_config['coherent-beams-tscrunch'],
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._cbc_tscrunch_sensor)

        self._cbc_fscrunch_sensor = Sensor.integer(
            "coherent-beam-fscrunch",
            description = "The number frequency channels that will be integrated when producing coherent beams",
            default = self._default_sb_config['coherent-beams-fscrunch'],
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._cbc_fscrunch_sensor)

        self._cbc_antennas_sensor = Sensor.string(
            "coherent-beam-antennas",
            description = "The antennas that will be used when producing coherent beams",
            default = self._default_sb_config['coherent-beams-antennas'],
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._cbc_antennas_sensor)

        self._cbc_mcast_groups_sensor = Sensor.string(
            "coherent-beam-multicast-groups",
            description = "Multicast groups used by this instance for sending coherent beam data",
            default = "",
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._cbc_mcast_groups_sensor)

        self._cbc_mcast_groups_mapping_sensor = Sensor.string(
            "coherent-beam-multicast-group-mapping",
            description = "Mapping of mutlicast group address to the coherent beams in that group",
            default= "",
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._cbc_mcast_groups_mapping_sensor)

        self._ibc_nbeams_sensor = Sensor.integer(
            "incoherent-beam-count",
            description = "The number of incoherent beams that this FBF instance can currently produce",
            default = 1,
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._ibc_nbeams_sensor)

        self._ibc_tscrunch_sensor = Sensor.integer(
            "incoherent-beam-tscrunch",
            description = "The number time samples that will be integrated when producing incoherent beams",
            default = self._default_sb_config['incoherent-beam-tscrunch'],
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._ibc_tscrunch_sensor)

        self._ibc_fscrunch_sensor = Sensor.integer(
            "incoherent-beam-fscrunch",
            description = "The number frequency channels that will be integrated when producing incoherent beams",
            default = self._default_sb_config['incoherent-beam-fscrunch'],
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._ibc_fscrunch_sensor)

        self._ibc_antennas_sensor = Sensor.string(
            "incoherent-beam-antennas",
            description = "The antennas that will be used when producing incoherent beams",
            default = self._default_sb_config['incoherent-beam-antennas'],
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._ibc_antennas_sensor)

        self._ibc_mcast_group_sensor = Sensor.string(
            "incoherent-beam-multicast-group",
            description = "Multicast group used by this instance for sending incoherent beam data",
            default = "",
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._ibc_mcast_group_sensor)

        self._servers_sensor = Sensor.string(
            "servers",
            description = "The worker server instances currently allocated to this product",
            default = ",".join(["{s.hostname}:{s.port}".format(s=server) for server in self._servers]),
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._servers_sensor)

        self._nserver_sets_sensor = Sensor.integer(
            "nserver-sets",
            description = "The number of server sets (independent subscriptions to the F-engines)",
            default = 1,
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._nserver_sets_sensor)

        self._nservers_per_set_sensor = Sensor.integer(
            "nservers-per-set",
            description = "The number of servers per server set",
            default = 1,
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._nservers_per_set_sensor)

        self._delay_config_server_sensor = Sensor.string(
            "delay-config-server",
            description = "The address of the delay configuration server for this product",
            default = "",
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._delay_config_server_sensor)

    def teardown_sensors(self):
        """
        @brief    Remove all sensors created by this product from the parent server.

        @note     This method is required for cleanup to stop the FBF sensor pool
                  becoming swamped with unused sensors.
        """
        for sensor in self._managed_sensors:
            self._parent.remove_sensor(sensor)
        self._managed_sensors = []
        self._parent.mass_inform(Message.inform('interface-changed'))

    @property
    def servers(self):
        return self._servers

    @property
    def capturing(self):
        return self.state == self.CAPTURING

    @property
    def idle(self):
        return self.state == self.IDLE

    @property
    def starting(self):
        return self.state == self.STARTING

    @property
    def stopping(self):
        return self.state == self.STOPPING

    @property
    def ready(self):
        return self.state == self.READY

    @property
    def preparing(self):
        return self.state == self.PREPARING

    @property
    def error(self):
        return self.state == self.ERROR

    @property
    def state(self):
        return self._state_sensor.value()

    def _verify_antennas(self, antennas):
        """
        @brief      Verify that a set of antennas is available to this instance.

        @param      antennas   A CSV list of antenna names
        """
        self.log.debug("Verifying antenna set: {}".format(antennas))
        antennas_set = set([ant.name for ant in self._katpoint_antennas])
        requested_antennas = set(antennas)
        return requested_antennas.issubset(antennas_set)

    def set_configuration_authority(self, hostname, port):
        if self._ca_client:
            self._ca_client.stop()
        self._ca_client = KATCPClientResource(dict(
            name = 'configuration-authority-client',
            address = (hostname, port),
            controlled = True))
        self._ca_client.start()
        self._ca_address_sensor.set_value("{}:{}".format(hostname, port))

    @coroutine
    def get_ca_sb_configuration(self, sb_id):
        self.log.debug("Retrieving schedule block configuration from configuration authority")
        yield self._ca_client.until_synced()
        try:
            response = yield self._ca_client.req.get_schedule_block_configuration(self._proxy_name, sb_id)
        except Exception as error:
            self.log.error("Request for SB configuration to CA failed with error: {}".format(str(error)))
            raise error
        try:
            config_dict = json.loads(response.reply.arguments[1])
        except Exception as error:
            self.log.error("Could not parse CA SB configuration with error: {}".format(str(error)))
            raise error
        self.log.debug("Configuration authority returned: {}".format(config_dict))
        raise Return(config_dict)

    def reset_sb_configuration(self):
        self.log.debug("Reseting schedule block configuration")
        try:
            self.capture_stop()
        except Exception as error:
            self.log.warning("Received error while attempting capture stop: {}".format(str(error)))
        self._parent._server_pool.deallocate(self._servers)

        if self._ibc_mcast_group:
            self._parent._ip_pool.free(self._ibc_mcast_group)
        if self._cbc_mcast_groups:
            self._parent._ip_pool.free(self._cbc_mcast_groups)
        self._cbc_mcast_groups = None
        self._ibc_mcast_group = None
        self._servers = []
        if self._delay_config_server:
            self._delay_config_server.stop()
            self._delay_config_server = None
        self._beam_manager = None

    def set_error_state(self, message):
        self.reset_sb_configuration()
        self._state_sensor.set_value(self.ERROR)

    def set_sb_configuration(self, config_dict):
        """
        @brief  Set the schedule block configuration for this product

        @param  config_dict  A dictionary specifying configuation parameters, e.g.
                             @code
                                   {
                                   u'coherent-beams-nbeams':100,
                                   u'coherent-beams-tscrunch':22,
                                   u'coherent-beams-fscrunch':2,
                                   u'coherent-beams-antennas':'m007',
                                   u'coherent-beams-granularity':6,
                                   u'incoherent-beam-tscrunch':16,
                                   u'incoherent-beam-fscrunch':1,
                                   u'incoherent-beam-antennas':'m008'
                                   }
                             @endcode

        @detail Valid parameters for the configuration dictionary are as follows:

                 coherent-beams-nbeams      - The desired number of coherent beams to produce
                 coherent-beams-tscrunch    - The number of spectra to integrate in the coherent beamformer
                 coherent-beams-tscrunch    - The number of spectra to integrate in the coherent beamformer
                 coherent-beams-fscrunch    - The number of channels to integrate in the coherent beamformer
                 coherent-beams-antennas    - The specific antennas to use for the coherent beamformer
                 coherent-beams-granularity - The number of beams per output mutlicast group
                                              (an integer divisor or multiplier of this number will be used)
                 incoherent-beam-tscrunch   - The number of spectra to integrate in the incoherent beamformer
                 incoherent-beam-fscrunch   - The number of channels to integrate in the incoherent beamformer
                 incoherent-beam-antennas   - The specific antennas to use for the incoherent beamformer
                 centre-frequency           - The desired centre frequency in Hz
                 bandwidth                  - The desired bandwidth in Hz

        @note   FBFUSE reasonably assumes that the user does not know the possible configurations at
                any given time. As such it tries to satisfy the users request but will not throw an
                error if the requested configuration is not acheivable, instead opting to provide a
                reduced configuration. For example the user may request 1000 beams and 6 beams per
                multicast group but FBFUSE may configure to produce 860 beams and 24 beams per multicast
                group. If the user can only use 6 beams per multcast group, then in the 24-beam case
                they must subscribe to the same multicast group 4 times on different nodes.

        """
        if self._previous_sb_config == config_dict:
            self.log.info("Configuration is unchanged, proceeding with existing configuration")
            return
        else:
            self._previous_sb_config = config_dict
        self.reset_sb_configuration()
        self.log.info("Setting schedule block configuration")
        config = deepcopy(self._default_sb_config)
        config.update(config_dict)
        self.log.info("Configuring using: {}".format(config))
        requested_cbc_antenna = parse_csv_antennas(config['coherent-beams-antennas'])
        if not self._verify_antennas(requested_cbc_antenna):
            raise Exception("Requested coherent beam antennas are not a subset of the available antennas")
        requested_ibc_antenna = parse_csv_antennas(config['incoherent-beam-antennas'])
        if not self._verify_antennas(requested_ibc_antenna):
            raise Exception("Requested incoherent beam antennas are not a subset of the available antennas")
        # first we need to get one ip address for the incoherent beam
        self._ibc_mcast_group = self._parent._ip_pool.allocate(1)
        self._ibc_mcast_group_sensor.set_value(self._ibc_mcast_group.format_katcp())
        largest_ip_range = self._parent._ip_pool.largest_free_range()
        nworkers_available = self._parent._server_pool.navailable()
        cm = FbfConfigurationManager(len(self._katpoint_antennas),
            self._feng_config['bandwidth'], self._n_channels,
            nworkers_available, largest_ip_range)
        requested_nantennas = len(parse_csv_antennas(config['coherent-beams-antennas']))
        mcast_config = cm.get_configuration(
            config['coherent-beams-tscrunch'],
            config['coherent-beams-fscrunch'],
            config['coherent-beams-nbeams'],
            requested_nantennas,
            config['bandwidth'],
            config['coherent-beams-granularity'])
        self._bandwidth_sensor.set_value(config['bandwidth'])
        self._cfreq_sensor.set_value(config['centre-frequency'])
        self._nchans_sensor.set_value(mcast_config['num_chans'])
        self._cbc_nbeams_sensor.set_value(mcast_config['num_beams'])
        self._cbc_nbeams_per_group.set_value(mcast_config['num_beams_per_mcast_group'])
        self._cbc_ngroups.set_value(mcast_config['num_mcast_groups'])
        self._cbc_nbeams_per_server_set.set_value(mcast_config['num_beams_per_worker_set'])
        self._cbc_tscrunch_sensor.set_value(config['coherent-beams-tscrunch'])
        self._cbc_fscrunch_sensor.set_value(config['coherent-beams-fscrunch'])
        self._cbc_antennas_sensor.set_value(config['coherent-beams-antennas'])
        self._ibc_tscrunch_sensor.set_value(config['incoherent-beam-tscrunch'])
        self._ibc_fscrunch_sensor.set_value(config['incoherent-beam-fscrunch'])
        self._ibc_antennas_sensor.set_value(config['incoherent-beam-antennas'])
        self._servers = self._parent._server_pool.allocate(mcast_config['num_workers_total'])
        server_str = ",".join(["{s.hostname}:{s.port}".format(s=server) for server in self._servers])
        self._servers_sensor.set_value(server_str)
        self._nserver_sets_sensor.set_value(mcast_config['num_worker_sets'])
        self._nservers_per_set_sensor.set_value(mcast_config['num_workers_per_set'])
        self._cbc_mcast_groups = self._parent._ip_pool.allocate(mcast_config['num_mcast_groups'])
        self._cbc_mcast_groups_sensor.set_value(self._cbc_mcast_groups.format_katcp())
        return cm

    @coroutine
    def get_ca_target_configuration(self, target):
        def ca_target_update_callback(received_timestamp, timestamp, status, value):
            # TODO, should we really reset all the beams or should we have
            # a mechanism to only update changed beams
            config_dict = json.loads(value)
            self.reset_beams()
            for target_string in config_dict.get('beams',[]):
                target = Target(target_string)
                self.add_beam(target)
            for tiling in config_dict.get('tilings',[]):
                target  = Target(tiling['target']) #required
                freq    = float(tiling.get('reference_frequency', self._cfreq_sensor.value()))
                nbeams  = int(tiling['nbeams'])
                overlap = float(tiling.get('overlap', 0.5))
                epoch   = float(tiling.get('epoch', time.time()))
                self.add_tiling(target, nbeams, freq, overlap, epoch)
        yield self._ca_client.until_synced()
        try:
            response = yield self._ca_client.req.target_configuration_start(self._proxy_name, target.format_katcp())
        except Exception as error:
            self.log.error("Request for target configuration to CA failed with error: {}".format(str(error)))
            raise error
        if not response.reply.reply_ok():
            error = Exception(response.reply.arguments[1])
            self.log.error("Request for target configuration to CA failed with error: {}".format(str(error)))
            raise error
        yield self._ca_client.until_synced()
        sensor = self._ca_client.sensor["{}_beam_position_configuration".format(self._proxy_name)]
        sensor.register_listener(ca_target_update_callback)
        self._ca_client.set_sampling_strategy(sensor.name, "event")

    def _beam_to_sensor_string(self, beam):
        return beam.target.format_katcp()

    @coroutine
    def target_start(self, target):
        self._phase_reference_sensor.set_value(target)
        if self._ca_client:
            yield self.get_ca_target_configuration(target)
        else:
            self.log.warning("No configuration authority is set, using default beam configuration")

    @coroutine
    def target_stop(self):
        if self._ca_client:
            sensor_name = "{}_beam_position_configuration".format(self._proxy_name)
            self._ca_client.set_sampling_strategy(sensor_name, "none")

    @coroutine
    def prepare(self, sb_id):
        """
        @brief      Prepare the beamformer for streaming

        @detail     This method evaluates the current configuration creates a new DelayEngine
                    and passes a prepare call to all allocated servers.
        """
        if not self.idle:
            raise FbfProductStateError([self.IDLE], self.state)
        self.log.info("Preparing FBFUSE product")
        self._state_sensor.set_value(self.PREPARING)
        self.log.debug("Product moved to 'preparing' state")
        # Here we need to parse the streams and assign beams to streams:
        #mcast_addrs, mcast_port = parse_stream(self._streams['cbf.antenna_channelised_voltage']['i0.antenna-channelised-voltage'])

        if not self._ca_client:
            self.log.warning("No configuration authority found, using default configuration parameters")
            cm = self.set_sb_configuration(self._default_sb_config)
        else:
            #TODO: get the schedule block ID into this call from somewhere (configure?)
            try:
                config = yield self.get_ca_sb_configuration(sb_id)
                cm = self.set_sb_configuration(config)
            except Exception as error:
                self.log.error("Configuring from CA failed with error: {}".format(str(error)))
                self.log.warning("Reverting to default configuration")
                cm = self.set_sb_configuration(self._default_sb_config)

        cbc_antennas_names = parse_csv_antennas(self._cbc_antennas_sensor.value())
        cbc_antennas = [self._antenna_map[name] for name in cbc_antennas_names]
        self._beam_manager = BeamManager(self._cbc_nbeams_sensor.value(), cbc_antennas)
        self._delay_config_server = DelayConfigurationServer("127.0.0.1", 0, self._beam_manager)
        self._delay_config_server.start()
        self.log.info("Started delay engine at: {}".format(self._delay_config_server.bind_address))
        de_ip, de_port = self._delay_config_server.bind_address
        self._delay_config_server_sensor.set_value((de_ip, de_port))

        # Need to tear down the beam sensors here
        # Here calculate the beam to multicast map
        self._beam_sensors = []
        mcast_to_beam_map = {}
        groups = [ip for ip in self._cbc_mcast_groups]
        idxs = [beam.idx for beam in self._beam_manager.get_beams()]
        for group in groups:
            self.log.debug("Allocating beams to {}".format(str(group)))
            key = str(group)
            for _ in range(self._cbc_nbeams_per_group.value()):
                if not key in mcast_to_beam_map:
                    mcast_to_beam_map[str(group)] = []
                value = idxs.pop(0)
                self.log.debug("--> Allocated {} to {}".format(value, str(group)))
                mcast_to_beam_map[str(group)].append(value)
        self._cbc_mcast_groups_mapping_sensor.set_value(json.dumps(mcast_to_beam_map))
        for beam in self._beam_manager.get_beams():
            sensor = Sensor.string(
                "coherent-beam-{}".format(beam.idx),
                description="R.A. (deg), declination (deg) and source name for coherent beam with ID {}".format(beam.idx),
                default=self._beam_to_sensor_string(beam),
                initial_status=Sensor.UNKNOWN)
            beam.register_observer(lambda beam, sensor=sensor:
                sensor.set_value(self._beam_to_sensor_string(beam)))
            self._beam_sensors.append(sensor)
            self.add_sensor(sensor)
        self._parent.mass_inform(Message.inform('interface-changed'))

        #Here we actually start to prepare the remote workers
        ip_splits = self._streams.split(N_FENG_STREAMS_PER_WORKER)

        # This is assuming lower sideband and bandwidth is always +ve
        fbottom = self._feng_config['centre-frequency'] - self._feng_config['bandwidth']/2.

        coherent_beam_config = {
        'tscrunch':self._cbc_tscrunch_sensor.value(),
        'fscrunch':self._cbc_fscrunch_sensor.value(),
        'antennas':self._cbc_antennas_sensor.value()
        }

        incoherent_beam_config = {
        'tscrunch':self._ibc_tscrunch_sensor.value(),
        'fscrunch':self._ibc_fscrunch_sensor.value(),
        'antennas':self._ibc_antennas_sensor.value()
        }

        prepare_futures = []
        for ii, (server, ip_range) in enumerate(zip(self._servers, ip_splits)):
            chan0_idx = cm.nchans_per_worker * ii
            chan0_freq =  fbottom + chan0_idx * cm.channel_bandwidth
            future = server.prepare(ip_range.format_katcp(), cm.nchans_per_group,
                        chan0_idx, chan0_freq, cm.channel_bandwidth, mcast_to_beam_map,
                        self._feng_config['feng-antenna-map'], coherent_beam_config,
                        incoherent_beam_config, de_ip, de_port)
            prepare_futures.append(future)

        failure_count = 0
        for future in prepare_futures:
            try:
                yield future
            except Exception as error:
                log.error("Failed to configure server with error: {}".format(str(error)))
                failure_count += 1

        if failure_count > 0:
            self._state_sensor.set_value(self.ERROR)
            self.log.info("Failed to prepare FBFUSE product")
        else:
            self._state_sensor.set_value(self.READY)
            self.log.info("Successfully prepared FBFUSE product")

    def deconfigure(self):
        """
        @brief  Deconfigure the product. To be called on a subarray deconfigure.

        @detail This is the final cleanup operation for the product, it should delete all sensors
                and ensure the release of all resource allocations.
        """
        self.reset_sb_configuration()
        self.teardown_sensors()

    def capture_start(self):
        if not self.ready:
            raise FbfProductStateError([self.READY], self.state)
        self._state_sensor.set_value(self.STARTING)
        self.log.debug("Product moved to 'starting' state")
        """
        futures = []
        for server in self._servers:
            futures.append(server.req.start_capture())
        for future in futures:
            try:
                response = yield future
            except:
                pass
        """
        self._state_sensor.set_value(self.CAPTURING)
        self.log.debug("Product moved to 'capturing' state")

    def capture_stop(self):
        """
        @brief      Stops the beamformer servers streaming.

        @detail     This should only be called on a schedule block reconfiguration
                    if the same configuration persists between schedule blocks then
                    it is preferable to continue streaming rather than stopping and
                    starting again.
        """
        if not self.capturing and not self.error:
            return
        self._state_sensor.set_value(self.STOPPING)
        self.target_stop()
        for server in self._servers:
            #yield server.req.deconfigure()
            pass
        self._state_sensor.set_value(self.IDLE)

    def add_beam(self, target):
        """
        @brief      Specify the parameters of one managed beam

        @param      target      A KATPOINT target object

        @return     Returns the allocated Beam object
        """
        valid_states = [self.READY, self.CAPTURING, self.STARTING]
        if not self.state in valid_states:
            raise FbfProductStateError(valid_states, self.state)
        return self._beam_manager.add_beam(target)

    def add_tiling(self, target, number_of_beams, reference_frequency, overlap, epoch):
        """
        @brief   Add a tiling to be managed

        @param      target      A KATPOINT target object

        @param      reference_frequency     The reference frequency at which to calculate the synthesised beam shape,
                                            and thus the tiling pattern. Typically this would be chosen to be the
                                            centre frequency of the current observation.

        @param      overlap         The desired overlap point between beams in the pattern. The overlap defines
                                    at what power point neighbouring beams in the tiling pattern will meet. For
                                    example an overlap point of 0.1 corresponds to beams overlapping only at their
                                    10%-power points. Similarly a overlap of 0.5 corresponds to beams overlapping
                                    at their half-power points. [Note: This is currently a tricky parameter to use
                                    when values are close to zero. In future this may be define in sigma units or
                                    in multiples of the FWHM of the beam.]

        @returns    The created Tiling object
        """
        valid_states = [self.READY, self.CAPTURING, self.STARTING]
        if not self.state in valid_states:
            raise FbfProductStateError(valid_states, self.state)
        tiling = self._beam_manager.add_tiling(target, number_of_beams, reference_frequency, overlap)
        try:
            tiling.generate(self._katpoint_antennas, epoch)
        except Exception as error:
            self.log.error("Failed to generate tiling pattern with error: {}".format(str(error)))
        return tiling

    def reset_beams(self):
        """
        @brief  reset and deallocate all beams and tilings managed by this instance

        @note   All tiling will be lost on this call and must be remade for subsequent observations
        """
        valid_states = [self.READY, self.CAPTURING, self.STARTING]
        if not self.state in valid_states:
            raise FbfProductStateError(valid_states, self.state)
        self._beam_manager.reset()
Ejemplo n.º 19
0
class FbfWorkerServer(AsyncDeviceServer):
    VERSION_INFO = ("fbf-control-server-api", 0, 1)
    BUILD_INFO = ("fbf-control-server-implementation", 0, 1, "rc1")
    DEVICE_STATUSES = ["ok", "degraded", "fail"]
    STATES = [
        "idle", "preparing", "ready", "starting", "capturing", "stopping",
        "error"
    ]
    IDLE, PREPARING, READY, STARTING, CAPTURING, STOPPING, ERROR = STATES

    def __init__(self, ip, port, capture_interface, numa_node, exec_mode=FULL):
        """
        @brief       Construct new FbfWorkerServer instance

        @params  ip       The interface address on which the server should listen
        @params  port     The port that the server should bind to
        @params  de_ip    The IP address of the delay engine server
        @params  de_port  The port number for the delay engine server

        """
        self._dc_ip = None
        self._dc_port = None
        self._delay_client = None
        self._delays = None
        self._numa = numa_node
        self._exec_mode = exec_mode
        self._dada_input_key = "dada"
        self._dada_coh_output_key = "caca"
        self._dada_incoh_output_key = "baba"
        self._capture_interface = capture_interface
        self._capture_monitor = None

        self._input_level = 10.0
        self._output_level = 10.0
        self._partition_bandwidth = None
        self._centre_frequency = None

        super(FbfWorkerServer, self).__init__(ip, port)

    @coroutine
    def start(self):
        """Start FbfWorkerServer server"""
        super(FbfWorkerServer, self).start()

    @coroutine
    def stop(self):
        yield super(FbfWorkerServer, self).stop()

    def setup_sensors(self):
        """
        @brief    Set up monitoring sensors.

        Sensor list:
        - device-status
        - local-time-synced
        - fbf0-status
        - fbf1-status

        @note     The following sensors are made available on top of default
                  sensors implemented in AsynDeviceServer and its base classes.

                  device-status:      Reports the health status of the FBFUSE
                                      and associated devices:
                                      Among other things report HW failure, SW
                                      failure and observation failure.
        """
        self._device_status_sensor = Sensor.discrete(
            "device-status",
            description="Health status of FbfWorkerServer instance",
            params=self.DEVICE_STATUSES,
            default="ok",
            initial_status=Sensor.NOMINAL)
        self.add_sensor(self._device_status_sensor)

        self._state_sensor = LoggingSensor.discrete(
            "state",
            params=self.STATES,
            description="The current state of this worker instance",
            default=self.IDLE,
            initial_status=Sensor.NOMINAL)
        self._state_sensor.set_logger(log)
        self.add_sensor(self._state_sensor)

        self._capture_interface_sensor = Sensor.string(
            "capture-interface",
            description="The IP address of the NIC to be used for data capture",
            default=self._capture_interface,
            initial_status=Sensor.NOMINAL)
        self.add_sensor(self._capture_interface_sensor)

        self._delay_client_sensor = Sensor.string(
            "delay-engine-server",
            description="The address of the currently set delay engine",
            default="",
            initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._delay_client_sensor)

        self._antenna_capture_order_sensor = Sensor.string(
            "antenna-capture-order",
            description=
            "The order in which the worker will capture antennas internally",
            default="",
            initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._antenna_capture_order_sensor)

        self._mkrecv_header_sensor = Sensor.string(
            "mkrecv-capture-header",
            description=
            "The MKRECV/DADA header used for configuring capture with MKRECV",
            default="",
            initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._mkrecv_header_sensor)

        self._mksend_coh_header_sensor = Sensor.string(
            "mksend-coherent-beam-header",
            description=
            "The MKSEND/DADA header used for configuring transmission of coherent beam data",
            default="",
            initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._mksend_coh_header_sensor)

        self._mksend_incoh_header_sensor = Sensor.string(
            "mksend-incoherent-beam-header",
            description=
            "The MKSEND/DADA header used for configuring transmission of incoherent beam data",
            default="",
            initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._mksend_incoh_header_sensor)

        self._psrdada_cpp_args_sensor = Sensor.string(
            "psrdada-cpp-arguments",
            description="The command line arguments used to invoke psrdada_cpp",
            default="",
            initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._psrdada_cpp_args_sensor)

        self._mkrecv_heap_loss = Sensor.float(
            "feng-heap-loss",
            description=("The percentage if F-engine heaps lost "
                         "(within MKRECV statistics window)"),
            default=0.0,
            initial_status=Sensor.UNKNOWN,
            unit="%")
        self.add_sensor(self._mkrecv_heap_loss)

        self._ingress_buffer_percentage = Sensor.float(
            "ingress-buffer-fill-level",
            description=("The percentage fill level for the capture"
                         "buffer between MKRECV and PSRDADA_CPP"),
            default=0.0,
            initial_status=Sensor.UNKNOWN,
            unit="%")
        self.add_sensor(self._ingress_buffer_percentage)

        self._cb_egress_buffer_percentage = Sensor.float(
            "cb-egress-buffer-fill-level",
            description=("The percentage fill level for the transmission"
                         "buffer between PSRDADA_CPP and MKSEND (for "
                         "coherent beams)"),
            default=0.0,
            initial_status=Sensor.UNKNOWN,
            unit="%")
        self.add_sensor(self._cb_egress_buffer_percentage)

        self._ib_egress_buffer_percentage = Sensor.float(
            "ib-egress-buffer-fill-level",
            description=("The percentage fill level for the transmission"
                         "buffer between PSRDADA_CPP and MKSEND (for "
                         "incoherent beams)"),
            default=0.0,
            initial_status=Sensor.UNKNOWN,
            unit="%")
        self.add_sensor(self._ib_egress_buffer_percentage)

    @property
    def capturing(self):
        return self.state == self.CAPTURING

    @property
    def idle(self):
        return self.state == self.IDLE

    @property
    def starting(self):
        return self.state == self.STARTING

    @property
    def stopping(self):
        return self.state == self.STOPPING

    @property
    def ready(self):
        return self.state == self.READY

    @property
    def preparing(self):
        return self.state == self.PREPARING

    @property
    def error(self):
        return self.state == self.ERROR

    @property
    def state(self):
        return self._state_sensor.value()

    def _system_call_wrapper(self, cmd):
        log.debug("System call: '{}'".format(" ".join(cmd)))
        check_call(cmd)

    @coroutine
    def _make_db(self, key, block_size, nblocks, timeout=120):
        try:
            yield self._destroy_db(key, timeout=20)
        except Exception as error:
            log.debug("Could not clean previous buffer (key={}): {}".format(
                key, str(error)))
        log.debug(("Building DADA buffer: key={}, block_size={}, "
                   "nblocks={}").format(key, block_size, nblocks))
        if self._exec_mode == FULL:
            cmdline = map(str, [
                "dada_db", "-k", key, "-b", block_size, "-n", nblocks, "-l",
                "-p"
            ])
            proc = Popen(cmdline,
                         stdout=PIPE,
                         stderr=PIPE,
                         shell=False,
                         close_fds=True)
            yield process_watcher(proc,
                                  name="make_db({})".format(key),
                                  timeout=timeout)
        else:
            log.warning(("Current execution mode disables "
                         "DADA buffer creation/destruction"))

    @coroutine
    def _destroy_db(self, key, timeout=20.0):
        log.debug("Destroying DADA buffer with key={}".format(key))
        if self._exec_mode == FULL:
            cmdline = map(str, ["dada_db", "-k", key, "-d"])
            proc = Popen(cmdline,
                         stdout=PIPE,
                         stderr=PIPE,
                         shell=False,
                         close_fds=True)
            yield process_watcher(proc,
                                  name="destroy_db({})".format(key),
                                  timeout=timeout)
        else:
            log.warning(("Current execution mode disables "
                         "DADA buffer creation/destruction"))

    @coroutine
    def _reset_db(self, key, timeout=5.0):
        log.debug("Resetting DADA buffer with key={}".format(key))
        if self._exec_mode == FULL:
            cmdline = map(str, ["dbreset", "-k", key])
            proc = Popen(cmdline,
                         stdout=PIPE,
                         stderr=PIPE,
                         shell=False,
                         close_fds=True)
            yield process_watcher(proc,
                                  name="reset_db({})".format(key),
                                  timeout=timeout)
        else:
            log.warning(("Current execution mode disables "
                         "DADA buffer reset"))

    def set_affinity(self, pid, core_spec):
        log.debug("Setting affinity for PID {} to {}".format(pid, core_spec))
        os.system("taskset -cp {} {}".format(core_spec, pid))

    @request(Float(), Float())
    @return_reply()
    def request_set_levels(self, req, input_level, output_level):
        """
        @brief    Set the input and output levels for FBFUSE

        @param      req             A katcp request object

        @param    input_level  The standard deviation of the data
                               from the F-engines.

        @param    output_level  The standard deviation of the data
                                output from FBFUSE.
        """
        self._input_level = input_level
        self._output_level = output_level
        return ("ok", )

    @request(Str(), Int(), Int(), Float(), Float(), Str(), Str(), Str(), Str(),
             Int())
    @return_reply()
    def request_prepare(self, req, feng_groups, nchans_per_group, chan0_idx,
                        chan0_freq, chan_bw, feng_config, coherent_beam_config,
                        incoherent_beam_config, dc_ip, dc_port):
        """
        @brief      Prepare FBFUSE to receive and process data from a subarray

        @detail     REQUEST ?configure feng_groups, nchans_per_group, chan0_idx, chan0_freq,
                        chan_bw, mcast_to_beam_map, antenna_to_feng_id_map, coherent_beam_config,
                        incoherent_beam_config
                    Configure FBFUSE for the particular data products

        @param      req                 A katcp request object

        @param      feng_groups         The contiguous range of multicast groups to capture F-engine data from,
                                        the parameter is formatted in stream notation, e.g.: spead://239.11.1.150+3:7148

        @param      nchans_per_group    The number of frequency channels per multicast group

        @param      chan0_idx           The index of the first channel in the set of multicast groups

        @param      chan0_freq          The frequency in Hz of the first channel in the set of multicast groups

        @param      chan_bw             The channel bandwidth in Hz

        @param      feng_config    JSON dictionary containing general F-engine parameters.

                                        @code
                                           {
                                              'bandwidth': 856e6,
                                              'centre-frequency': 1200e6,
                                              'sideband': 'upper',
                                              'feng-antenna-map': {...},
                                              'sync-epoch': 12353524243.0,
                                              'nchans': 4096
                                           }

        @param      coherent_beam_config   A JSON object specifying the coherent beam configuration in the form:

                                           @code
                                              {
                                                'tscrunch':16,
                                                'fscrunch':1,
                                                'nbeams': 400,
                                                'antennas':'m007,m008,m009',
                                                'destination': 'spead://239.11.1.0+127:7148'
                                              }
                                           @endcode

        @param      incoherent_beam_config  A JSON object specifying the incoherent beam configuration in the form:

                                           @code
                                              {
                                                'tscrunch':16,
                                                'fscrunch':1,
                                                'antennas':'m007,m008,m009',
                                                'destination': 'spead://239.11.1.150:7148'
                                              }
                                           @endcode

        @return     katcp reply object [[[ !configure ok | (fail [error description]) ]]]
        """
        if not self.idle:
            return ("fail", "FBF worker not in IDLE state")

        log.info("Preparing worker server instance")
        try:
            feng_config = json.loads(feng_config)
        except Exception as error:
            msg = ("Unable to parse F-eng config with "
                   "error: {}").format(str(error))
            log.error("Prepare failed: {}".format(msg))
            return ("fail", msg)
        log.info("F-eng config: {}".format(feng_config))
        try:
            coherent_beam_config = json.loads(coherent_beam_config)
        except Exception as error:
            msg = ("Unable to parse coherent beam "
                   "config with error: {}").format(str(error))
            log.error("Prepare failed: {}".format(msg))
            return ("fail", msg)
        log.info("Coherent beam config: {}".format(coherent_beam_config))
        try:
            incoherent_beam_config = json.loads(incoherent_beam_config)
        except Exception as error:
            msg = ("Unable to parse incoherent beam "
                   "config with error: {}").format(str(error))
            log.error("Prepare failed: {}".format(msg))
            return ("fail", msg)
        log.info("Incoherent beam config: {}".format(incoherent_beam_config))

        @coroutine
        def configure():
            self._state_sensor.set_value(self.PREPARING)
            log.debug("Starting delay configuration server client")
            self._delay_client = KATCPClientResource(
                dict(name="delay-configuration-client",
                     address=(dc_ip, dc_port),
                     controlled=True))
            self._delay_client.start()

            log.info("Determining F-engine capture order")
            feng_capture_order_info = determine_feng_capture_order(
                feng_config['feng-antenna-map'], coherent_beam_config,
                incoherent_beam_config)
            log.info("F-engine capture order info: {}".format(
                feng_capture_order_info))
            feng_to_antenna_map = {
                value: key
                for key, value in feng_config['feng-antenna-map'].items()
            }
            antenna_capture_order_csv = ",".join([
                feng_to_antenna_map[feng_id]
                for feng_id in feng_capture_order_info['order']
            ])
            self._antenna_capture_order_sensor.set_value(
                antenna_capture_order_csv)

            log.debug("Parsing F-engines to capture: {}".format(feng_groups))
            capture_range = ip_range_from_stream(feng_groups)
            ngroups = capture_range.count
            partition_nchans = nchans_per_group * ngroups
            worker_idx = chan0_idx / partition_nchans
            partition_bandwidth = partition_nchans * chan_bw
            self._partition_bandwidth = partition_bandwidth
            sample_clock = feng_config['bandwidth'] * 2
            timestamp_step = feng_config['nchans'] * 2 * 256
            frequency_ids = [
                chan0_idx + nchans_per_group * ii for ii in range(ngroups)
            ]
            nantennas = len(feng_capture_order_info['order'])
            heap_size = nchans_per_group * PACKET_PAYLOAD_SIZE
            heap_group_size = ngroups * heap_size * nantennas
            ngroups_data = int(MAX_DADA_BLOCK_SIZE / heap_group_size)
            ngroups_data = 2**((ngroups_data - 1).bit_length())
            centre_frequency = chan0_freq + self._partition_bandwidth / 2.0
            self._centre_frequency = centre_frequency

            # Coherent beam timestamps
            coh_heap_size = 8192
            nsamps_per_coh_heap = (
                coh_heap_size /
                (partition_nchans * coherent_beam_config['fscrunch']))
            coh_timestamp_step = (coherent_beam_config['tscrunch'] *
                                  nsamps_per_coh_heap * 2 *
                                  feng_config["nchans"])

            # Incoherent beam timestamps
            incoh_heap_size = 8192
            nsamps_per_incoh_heap = (
                incoh_heap_size /
                (partition_nchans * incoherent_beam_config['fscrunch']))
            incoh_timestamp_step = (incoherent_beam_config['tscrunch'] *
                                    nsamps_per_incoh_heap * 2 *
                                    feng_config["nchans"])

            timestamp_modulus = lcm(
                timestamp_step, lcm(incoh_timestamp_step, coh_timestamp_step))

            if self._exec_mode == FULL:
                dada_mode = 4
            else:
                dada_mode = 0
            mkrecv_config = {
                'dada_mode':
                dada_mode,
                'dada_key':
                self._dada_input_key,
                'sync_epoch':
                feng_config['sync-epoch'],
                'sample_clock':
                sample_clock,
                'mcast_sources':
                ",".join([str(group) for group in capture_range]),
                'mcast_port':
                capture_range.port,
                'interface':
                self._capture_interface,
                'timestamp_step':
                timestamp_step,
                'timestamp_modulus':
                timestamp_modulus,
                'ordered_feng_ids_csv':
                ",".join(map(str, feng_capture_order_info['order'])),
                'frequency_partition_ids_csv':
                ",".join(map(str, frequency_ids)),
                'ngroups_data':
                ngroups_data,
                'heap_size':
                heap_size
            }
            mkrecv_header = make_mkrecv_header(mkrecv_config,
                                               outfile=MKRECV_CONFIG_FILENAME)
            self._mkrecv_header_sensor.set_value(mkrecv_header)
            log.info(
                "Determined MKRECV configuration:\n{}".format(mkrecv_header))

            coh_ip_range = ip_range_from_stream(
                coherent_beam_config['destination'])
            nbeams = coherent_beam_config['nbeams']
            nbeams_per_group = nbeams / coh_ip_range.count
            msg = "nbeams is not a mutliple of the IP range"
            assert nbeams % coh_ip_range.count == 0, msg
            """
            Note on data rates:
            For both the coherent and incoherent beams, we set the sending
            rate in MKSEND equal to 110% of the required data rate. This
            is a fudge to ensure that we send rapidly enough that MKSEND
            does not limit performance while at the same time ensuring that
            the burst rate out of the instrument is limited. This number
            may need to be tuned.
            """
            coh_data_rate = (partition_bandwidth /
                             coherent_beam_config['tscrunch'] /
                             coherent_beam_config['fscrunch'] *
                             nbeams_per_group * 1.1)
            heap_id_start = worker_idx * coh_ip_range.count
            log.debug("Determining MKSEND configuration for coherent beams")
            dada_mode = int(self._exec_mode == FULL)
            coherent_mcast_dest = coherent_beam_config['destination'].lstrip(
                "spead://").split(":")[0]
            mksend_coh_config = {
                'dada_key': self._dada_coh_output_key,
                'dada_mode': dada_mode,
                'interface': self._capture_interface,
                'data_rate': coh_data_rate,
                'mcast_port': coh_ip_range.port,
                'mcast_destinations': coherent_mcast_dest,
                'sync_epoch': feng_config['sync-epoch'],
                'sample_clock': sample_clock,
                'heap_size': coh_heap_size,
                'heap_id_start': heap_id_start,
                'timestamp_step': coh_timestamp_step,
                'beam_ids': "0:{}".format(nbeams),
                'multibeam': True,
                'subband_idx': chan0_idx,
                'heap_group': nbeams_per_group
            }
            mksend_coh_header = make_mksend_header(
                mksend_coh_config, outfile=MKSEND_COHERENT_CONFIG_FILENAME)
            log.info(("Determined MKSEND configuration for coherent beams:\n{}"
                      ).format(mksend_coh_header))
            self._mksend_coh_header_sensor.set_value(mksend_coh_header)

            log.debug("Determining MKSEND configuration for incoherent beams")
            incoh_data_rate = (partition_bandwidth /
                               incoherent_beam_config['tscrunch'] /
                               incoherent_beam_config['fscrunch'] * 1.1)
            dada_mode = int(self._exec_mode == FULL)
            incoh_ip_range = ip_range_from_stream(
                incoherent_beam_config['destination'])
            coherent_mcast_dest = incoherent_beam_config['destination'].lstrip(
                "spead://").split(":")[0]
            mksend_incoh_config = {
                'dada_key': self._dada_incoh_output_key,
                'dada_mode': dada_mode,
                'interface': self._capture_interface,
                'data_rate': incoh_data_rate,
                'mcast_port': incoh_ip_range.port,
                'mcast_destinations': coherent_mcast_dest,
                'sync_epoch': feng_config['sync-epoch'],
                'sample_clock': sample_clock,
                'heap_size': incoh_heap_size,
                'heap_id_start': worker_idx,
                'timestamp_step': incoh_timestamp_step,
                'beam_ids': 0,
                'multibeam': False,
                'subband_idx': chan0_idx,
                'heap_group': 1
            }
            mksend_incoh_header = make_mksend_header(
                mksend_incoh_config, outfile=MKSEND_INCOHERENT_CONFIG_FILENAME)
            log.info(
                "Determined MKSEND configuration for incoherent beam:\n{}".
                format(mksend_incoh_header))
            self._mksend_incoh_header_sensor.set_value(mksend_incoh_header)
            """
            Tasks:
                - compile kernels
                - create shared memory banks
            """
            # Here we create a future object for the psrdada_cpp compilation
            # this is the longest running setup task and so intermediate steps
            # such as dada buffer generation
            fbfuse_pipeline_params = {
                'total_nantennas':
                len(feng_capture_order_info['order']),
                'fbfuse_nchans':
                partition_nchans,
                'total_nchans':
                feng_config['nchans'],
                'coherent_tscrunch':
                coherent_beam_config['tscrunch'],
                'coherent_fscrunch':
                coherent_beam_config['fscrunch'],
                'coherent_nantennas':
                len(coherent_beam_config['antennas'].split(",")),
                'coherent_antenna_offset':
                feng_capture_order_info["coherent_span"][0],
                'coherent_nbeams':
                nbeams,
                'incoherent_tscrunch':
                incoherent_beam_config['tscrunch'],
                'incoherent_fscrunch':
                incoherent_beam_config['fscrunch']
            }
            psrdada_compilation_future = compile_psrdada_cpp(
                fbfuse_pipeline_params)

            log.info("Creating all DADA buffers")
            # Create capture data DADA buffer
            capture_block_size = ngroups_data * heap_group_size
            capture_block_count = int(AVAILABLE_CAPTURE_MEMORY /
                                      capture_block_size)
            log.debug("Creating dada buffer for input with key '{}'".format(
                "%s" % self._dada_input_key))
            input_make_db_future = self._make_db(self._dada_input_key,
                                                 capture_block_size,
                                                 capture_block_count)

            # Create coherent beam output DADA buffer
            coh_output_channels = (ngroups * nchans_per_group) / \
                coherent_beam_config['fscrunch']
            coh_output_samples = ngroups_data * \
                256 / coherent_beam_config['tscrunch']
            coherent_block_size = (nbeams * coh_output_channels *
                                   coh_output_samples)
            coherent_block_count = 32
            log.debug(
                ("Creating dada buffer for coherent beam output "
                 "with key '{}'").format("%s" % self._dada_coh_output_key))
            coh_output_make_db_future = self._make_db(
                self._dada_coh_output_key, coherent_block_size,
                coherent_block_count)

            # Create incoherent beam output DADA buffer
            incoh_output_channels = ((ngroups * nchans_per_group) /
                                     incoherent_beam_config['fscrunch'])
            incoh_output_samples = ((ngroups_data * 256) /
                                    incoherent_beam_config['tscrunch'])
            incoherent_block_size = incoh_output_channels * incoh_output_samples
            incoherent_block_count = 32
            log.debug(("Creating dada buffer for incoherent beam "
                       "output with key '{}'").format(
                           "%s" % self._dada_incoh_output_key))
            incoh_output_make_db_future = self._make_db(
                self._dada_incoh_output_key, incoherent_block_size,
                incoherent_block_count)

            # Need to pass the delay buffer controller the F-engine capture
            # order but only for the coherent beams
            cstart, cend = feng_capture_order_info['coherent_span']
            coherent_beam_feng_capture_order = feng_capture_order_info[
                'order'][cstart:cend]
            coherent_beam_antenna_capture_order = [
                feng_to_antenna_map[idx]
                for idx in coherent_beam_feng_capture_order
            ]

            # Start DelayBufferController instance
            # Here we are going to make the assumption that the server and processing all run in
            # one docker container that will be preallocated with the right CPU set, GPUs, memory
            # etc. This means that the configurations need to be unique by NUMA node... [Note: no
            # they don't, we can use the container IPC channel which isolates
            # the IPC namespaces.]
            #
            # Here we recreate the beam keys as they are handled by the BeamManager
            # instance in the product controller
            #
            beam_idxs = ["cfbf%05d" % (i) for i in range(nbeams)]
            self._delay_buf_ctrl = DelayBufferController(
                self._delay_client, beam_idxs,
                coherent_beam_antenna_capture_order, 1)
            yield self._delay_buf_ctrl.start()

            # By this point we require psrdada_cpp to have been compiled
            # as such we can yield on the future we created earlier
            yield psrdada_compilation_future

            # Now we can yield on dada buffer generation
            yield input_make_db_future
            yield coh_output_make_db_future
            yield incoh_output_make_db_future
            self._state_sensor.set_value(self.READY)
            log.info("Prepare request successful")
            req.reply("ok", )

        @coroutine
        def safe_configure():
            try:
                yield configure()
            except Exception as error:
                log.exception(str(error))
                req.reply("fail", str(error))

        self.ioloop.add_callback(safe_configure)
        raise AsyncReply

    @request()
    @return_reply()
    def request_deconfigure(self, req):
        """
        @brief      Deconfigure the FBFUSE instance.

        @note       Deconfigure the FBFUSE instance. If FBFUSE uses katportalclient to get information
                    from CAM, then it should disconnect at this time.

        @param      req               A katcp request object

        @return     katcp reply object [[[ !deconfigure ok | (fail [error description]) ]]]
        """

        # Need to make sure everything is stopped
        # Call self.stop?

        # Need to delete all allocated DADA buffers:
        log.info("Received deconfigure request")

        @coroutine
        def deconfigure():
            log.info("Destroying allocated DADA buffers")
            try:
                yield self._destroy_db(self._dada_input_key)
                yield self._destroy_db(self._dada_coh_output_key)
                yield self._destroy_db(self._dada_incoh_output_key)
            except Exception as error:
                log.warning("Error while destroying DADA buffers: {}".format(
                    str(error)))
            log.info("Destroying delay buffers")
            del self._delay_buf_ctrl
            self._delay_buf_ctrl = None
            self._state_sensor.set_value(self.IDLE)
            log.info("Deconfigure request successful")
            req.reply("ok", )

        self.ioloop.add_callback(deconfigure)
        raise AsyncReply

    @request()
    @return_reply()
    def request_capture_start(self, req):
        """
        @brief      Prepare FBFUSE ingest process for data capture.

        @note       A successful return value indicates that FBFUSE is ready for data capture and
                    has sufficient resources available. An error will indicate that FBFUSE is not
                    in a position to accept data

        @param      req               A katcp request object


        @return     katcp reply object [[[ !capture-init ok | (fail [error description]) ]]]
        """
        log.info("Received capture-start request")
        try:
            self.capture_start()
        except Exception as error:
            log.exception("Error during capture start")
            return ("fail", str(error))
        else:
            log.info("Capture-start successful")
            return ("ok", )

    def capture_start(self):
        if not self.ready:
            raise Exception("FBF worker not in READY state")
        self._state_sensor.set_value(self.STARTING)
        # Create SPEAD transmitter for coherent beams

        if self._numa == 0:
            mksend_cpu_set = "7"
            psrdada_cpp_cpu_set = "6"
            mkrecv_cpu_set = "0-5"
        else:
            mksend_cpu_set = "14"
            psrdada_cpp_cpu_set = "15"
            mkrecv_cpu_set = "8-13"

        self._mksend_coh_proc = ManagedProcess([
            "taskset", "-c", mksend_cpu_set, "mksend", "--header",
            MKSEND_COHERENT_CONFIG_FILENAME, "--quiet"
        ])

        self._mksend_incoh_proc = ManagedProcess([
            "taskset", "-c", mksend_cpu_set, "mksend", "--header",
            MKSEND_INCOHERENT_CONFIG_FILENAME, "--quiet"
        ])

        # Start beamforming pipeline
        log.info("Starting PSRDADA_CPP beamforming pipeline")
        delay_buffer_key = self._delay_buf_ctrl.shared_buffer_key
        # Start beamformer instance
        psrdada_cpp_cmdline = [
            "taskset", "-c", psrdada_cpp_cpu_set, "fbfuse", "--input_key",
            self._dada_input_key, "--cb_key", self._dada_coh_output_key,
            "--ib_key", self._dada_incoh_output_key, "--delay_key_root",
            delay_buffer_key, "--cfreq", self._centre_frequency, "--bandwidth",
            self._partition_bandwidth, "--input_level", self._input_level,
            "--output_level", self._output_level, "--log_level", "info"
        ]
        self._psrdada_cpp_args_sensor.set_value(" ".join(
            map(str, psrdada_cpp_cmdline)))
        log.debug(" ".join(map(str, psrdada_cpp_cmdline)))
        self._psrdada_cpp_proc = ManagedProcess(psrdada_cpp_cmdline)

        def update_heap_loss_sensor(curr, total, avg, window):
            self._mkrecv_heap_loss.set_value(100.0 - avg)

        # Create SPEAD receiver for incoming antenna voltages
        self._mkrecv_proc = ManagedProcess(
            [
                "taskset", "-c", mkrecv_cpu_set, "mkrecv_nt", "--header",
                MKRECV_CONFIG_FILENAME, "--quiet"
            ],
            stdout_handler=MkrecvStdoutHandler(
                callback=update_heap_loss_sensor))

        def exit_check_callback():
            if not self._mkrecv_proc.is_alive():
                log.error("mkrecv_nt exited unexpectedly")
                self.ioloop.add_callback(self.capture_stop)
            if not self._psrdada_cpp_proc.is_alive():
                log.error("fbfuse pipeline exited unexpectedly")
                self.ioloop.add_callback(self.capture_stop)
            if not self._mksend_coh_proc.is_alive():
                log.error("mksend coherent exited unexpectedly")
                self.ioloop.add_callback(self.capture_stop)
            if not self._mksend_incoh_proc.is_alive():
                log.error("mksend incoherent exited unexpectedly")
                self.ioloop.add_callback(self.capture_stop)
            self._capture_monitor.stop()

        self._capture_monitor = PeriodicCallback(exit_check_callback, 1000)
        self._capture_monitor.start()

        def dada_callback(params):
            self._ingress_buffer_percentage.set_value(params["fraction-full"])

        # start DB monitors
        self._ingress_buffer_monitor = DbMonitor(self._dada_input_key,
                                                 callback=dada_callback)
        self._ingress_buffer_monitor.start()
        self._cb_egress_buffer_monitor = DbMonitor(
            self._dada_input_key,
            callback=lambda params: self._cb_egress_buffer_percentage.
            set_value(params["fraction-full"]))
        self._cb_egress_buffer_monitor.start()
        self._ib_egress_buffer_monitor = DbMonitor(
            self._dada_input_key,
            callback=lambda params: self._ib_egress_buffer_percentage.
            set_value(params["fraction-full"]))
        self._ib_egress_buffer_monitor.start()
        self._state_sensor.set_value(self.CAPTURING)

    @request()
    @return_reply()
    def request_capture_stop(self, req):
        """
        @brief      Terminate the FBFUSE ingest process for the particular FBFUSE instance

        @note       This writes out any remaining metadata, closes all files, terminates any remaining processes and
                    frees resources for the next data capture.

        @param      req               A katcp request object

        @param      product_id        This is a name for the data product, used to track which subarray is being told to stop capture.
                                      For example "array_1_bc856M4k".

        @return     katcp reply object [[[ !capture-done ok | (fail [error description]) ]]]
        """
        log.info("Received capture-stop request")

        @coroutine
        def capture_stop_wrapper():
            try:
                yield self.capture_stop()
            except Exception as error:
                log.exception("Capture-stop request failed")
                req.reply("fail", str(error))
            else:
                log.info("Capture-stop request successful")
                req.reply("ok", )

        self.ioloop.add_callback(capture_stop_wrapper)
        raise AsyncReply

    @coroutine
    def capture_stop(self):
        if not self.capturing and not self.error:
            return
        log.info("Stopping capture")
        self._state_sensor.set_value(self.STOPPING)
        self._capture_monitor.stop()
        self._ingress_buffer_monitor.stop()
        self._cb_egress_buffer_monitor.stop()
        self._ib_egress_buffer_monitor.stop()
        log.info("Stopping MKRECV instance")
        self._mkrecv_proc.terminate()
        log.info("Stopping PSRDADA_CPP instance")
        self._psrdada_cpp_proc.terminate()
        log.info("Stopping MKSEND instances")
        self._mksend_incoh_proc.terminate()
        self._mksend_coh_proc.terminate()
        log.info("Resetting DADA buffers")
        reset_tasks = []
        reset_tasks.append(self._reset_db(self._dada_input_key, timeout=7.0))
        reset_tasks.append(
            self._reset_db(self._dada_coh_output_key, timeout=4.0))
        reset_tasks.append(
            self._reset_db(self._dada_incoh_output_key, timeout=5.0))
        for task in reset_tasks:
            try:
                yield task
            except Exception as error:
                log.warning("Error raised on DB reset: {}".format(str(error)))
        self._state_sensor.set_value(self.READY)
Ejemplo n.º 20
0
    def setup(self, subarray_size, antennas_csv, nbeams, tot_nchans,
              feng_groups, chan0_idx, worker_idx):
        cbc_antennas_names = parse_csv_antennas(antennas_csv)
        cbc_antennas = [Antenna(ANTENNAS[name]) for name in cbc_antennas_names]
        self._beam_manager = BeamManager(nbeams, cbc_antennas)
        self._delay_config_server = DelayConfigurationServer(
            "127.0.0.1", 0, self._beam_manager)
        self._delay_config_server.start()
        antennas_json = self._delay_config_server._antennas_sensor.value()
        antennas = json.loads(antennas_json)
        coherent_beams = ["cfbf{:05d}".format(ii) for ii in range(nbeams)]
        coherent_beams_csv = ",".join(coherent_beams)
        feng_antenna_map = {antenna: ii for ii, antenna in enumerate(antennas)}
        coherent_beam_antennas = antennas
        incoherent_beam_antennas = antennas
        nantennas = len(antennas)
        nchans_per_group = tot_nchans / subarray_size / 4
        nchans = ip_range_from_stream(feng_groups).count * nchans_per_group

        chan0_freq = 1240e6
        chan_bw = 856e6 / tot_nchans

        mcast_to_beam_map = {"spead://239.11.1.150:7148": "ifbf00001"}
        for ii in range(8):
            mcast_to_beam_map["spead://239.11.1.{}:7148".format(
                ii)] = ",".join(coherent_beams[4 * ii:4 * (ii + 1)])

        feng_config = {
            "bandwidth": 856e6,
            "centre-frequency": 1200e6,
            "sideband": "upper",
            "feng-antenna-map": feng_antenna_map,
            "sync-epoch": 1554907897.0,
            "nchans": tot_nchans
        }
        coherent_beam_config = {
            "tscrunch": 16,
            "fscrunch": 1,
            "antennas": ",".join(coherent_beam_antennas)
        }
        incoherent_beam_config = {
            "tscrunch": 16,
            "fscrunch": 1,
            "antennas": ",".join(incoherent_beam_antennas)
        }

        worker_client = KATCPClientResource(
            dict(name="worker-server-client",
                 address=self._worker_server.bind_address,
                 controlled=True))
        yield worker_client.start()
        yield worker_client.until_synced()

        print "preparing"
        response = yield worker_client.req.prepare(
            feng_groups,
            nchans_per_group,
            chan0_idx,
            chan0_freq,
            chan_bw,
            nbeams,
            json.dumps(mcast_to_beam_map),
            json.dumps(feng_config),
            json.dumps(coherent_beam_config),
            json.dumps(incoherent_beam_config),
            *self._delay_config_server.bind_address,
            timeout=300.0)
        if not response.reply.reply_ok():
            raise Exception("Error on prepare: {}".format(
                response.reply.arguments))
        else:
            print "prepare done"

        yield worker_client.req.capture_start()
Ejemplo n.º 21
0
class FbfWorkerServer(AsyncDeviceServer):
    VERSION_INFO = ("fbf-control-server-api", 0, 1)
    BUILD_INFO = ("fbf-control-server-implementation", 0, 1, "rc1")
    DEVICE_STATUSES = ["ok", "degraded", "fail"]
    STATES = ["idle", "preparing", "ready", "starting", "capturing", "stopping", "error"]
    IDLE, PREPARING, READY, STARTING, CAPTURING, STOPPING, ERROR = STATES

    def __init__(self, ip, port, dummy=False):
        """
        @brief       Construct new FbfWorkerServer instance

        @params  ip       The interface address on which the server should listen
        @params  port     The port that the server should bind to
        @params  de_ip    The IP address of the delay engine server
        @params  de_port  The port number for the delay engine server

        """
        self._dc_ip = None
        self._dc_port = None
        self._delay_client = None
        self._delay_client = None
        self._delays = None
        self._dummy = dummy
        self._dada_input_key = 0xdada
        self._dada_coh_output_key = 0xcaca
        self._dada_incoh_output_key = 0xbaba
        super(FbfWorkerServer, self).__init__(ip,port)

    @coroutine
    def start(self):
        """Start FbfWorkerServer server"""
        super(FbfWorkerServer,self).start()

    @coroutine
    def stop(self):
        yield self.deregister()
        yield super(FbfWorkerServer,self).stop()

    def setup_sensors(self):
        """
        @brief    Set up monitoring sensors.

        Sensor list:
        - device-status
        - local-time-synced
        - fbf0-status
        - fbf1-status

        @note     The following sensors are made available on top of default sensors
                  implemented in AsynDeviceServer and its base classes.

                  device-status:      Reports the health status of the FBFUSE and associated devices:
                                      Among other things report HW failure, SW failure and observation failure.
        """
        self._device_status_sensor = Sensor.discrete(
            "device-status",
            description = "Health status of FbfWorkerServer instance",
            params = self.DEVICE_STATUSES,
            default = "ok",
            initial_status = Sensor.NOMINAL)
        self.add_sensor(self._device_status_sensor)

        self._state_sensor = LoggingSensor.discrete(
            "state",
            params = self.STATES,
            description = "The current state of this worker instance",
            default = self.IDLE,
            initial_status = Sensor.NOMINAL)
        self._state_sensor.set_logger(log)
        self.add_sensor(self._state_sensor)

        self._delay_client_sensor = Sensor.string(
            "delay-engine-server",
            description = "The address of the currently set delay engine",
            default = "",
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._delay_client_sensor)

        self._antenna_capture_order_sensor = Sensor.string(
            "antenna-capture-order",
            description = "The order in which the worker will capture antennas internally",
            default = "",
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._antenna_capture_order_sensor)

        self._mkrecv_header_sensor = Sensor.string(
            "mkrecv-header",
            description = "The MKRECV/DADA header used for configuring capture with MKRECV",
            default = "",
            initial_status = Sensor.UNKNOWN)
        self.add_sensor(self._mkrecv_header_sensor)

    @property
    def capturing(self):
        return self.state == self.CAPTURING

    @property
    def idle(self):
        return self.state == self.IDLE

    @property
    def starting(self):
        return self.state == self.STARTING

    @property
    def stopping(self):
        return self.state == self.STOPPING

    @property
    def ready(self):
        return self.state == self.READY

    @property
    def preparing(self):
        return self.state == self.PREPARING

    @property
    def error(self):
        return self.state == self.ERROR

    @property
    def state(self):
        return self._state_sensor.value()

    def _system_call_wrapper(self, cmd):
        log.debug("System call: '{}'".format(" ".join(cmd)))
        if self._dummy:
            log.debug("Server is running in dummy mode, system call will be ignored")
        else:
            check_call(cmd)

    def _determine_feng_capture_order(self, antenna_to_feng_id_map, coherent_beam_config, incoherent_beam_config):
        # Need to sort the f-engine IDs into 4 states
        # 1. Incoherent but not coherent
        # 2. Incoherent and coherent
        # 3. Coherent but not incoherent
        # 4. Neither coherent nor incoherent
        #
        # We must catch all antennas as even in case 4 the data is required for the
        # transient buffer.
        #
        # To make this split, we first create the three sets, coherent, incoherent and all.
        mapping = antenna_to_feng_id_map
        all_feng_ids = set(mapping.values())
        coherent_feng_ids = set(mapping[antenna] for antenna in parse_csv_antennas(coherent_beam_config['antennas']))
        incoherent_feng_ids = set(mapping[antenna] for antenna in parse_csv_antennas(incoherent_beam_config['antennas']))
        incoh_not_coh = incoherent_feng_ids.difference(coherent_feng_ids)
        incoh_and_coh = incoherent_feng_ids.intersection(coherent_feng_ids)
        coh_not_incoh = coherent_feng_ids.difference(incoherent_feng_ids)
        used_fengs = incoh_not_coh.union(incoh_and_coh).union(coh_not_incoh)
        unused_fengs = all_feng_ids.difference(used_fengs)
        # Output final order
        final_order = list(incoh_not_coh) + list(incoh_and_coh) + list(coh_not_incoh) + list(unused_fengs)
        start_of_incoherent_fengs = 0
        end_of_incoherent_fengs = len(incoh_not_coh) + len(incoh_and_coh)
        start_of_coherent_fengs = len(incoh_not_coh)
        end_of_coherent_fengs = len(incoh_not_coh) + len(incoh_and_coh) + len(coh_not_incoh)
        start_of_unused_fengs = end_of_coherent_fengs
        end_of_unused_fengs = len(all_feng_ids)
        info = {
            "order": final_order,
            "incoherent_span":(start_of_incoherent_fengs, end_of_incoherent_fengs),
            "coherent_span":(start_of_coherent_fengs, end_of_coherent_fengs),
            "unused_span":(start_of_unused_fengs, end_of_unused_fengs)
        }
        return info

    @request(Str(), Int(), Int(), Float(), Float(), Str(), Str(), Str(), Str(), Str(), Int())
    @return_reply()
    def request_prepare(self, req, feng_groups, nchans_per_group, chan0_idx, chan0_freq,
                        chan_bw, mcast_to_beam_map, feng_config, coherent_beam_config,
                        incoherent_beam_config, dc_ip, dc_port):
        """
        @brief      Prepare FBFUSE to receive and process data from a subarray

        @detail     REQUEST ?configure feng_groups, nchans_per_group, chan0_idx, chan0_freq,
                        chan_bw, mcast_to_beam_map, antenna_to_feng_id_map, coherent_beam_config,
                        incoherent_beam_config
                    Configure FBFUSE for the particular data products

        @param      req                 A katcp request object

        @param      feng_groups         The contiguous range of multicast groups to capture F-engine data from,
                                        the parameter is formatted in stream notation, e.g.: spead://239.11.1.150+3:7147

        @param      nchans_per_group    The number of frequency channels per multicast group

        @param      chan0_idx           The index of the first channel in the set of multicast groups

        @param      chan0_freq          The frequency in Hz of the first channel in the set of multicast groups

        @param      chan_bw             The channel bandwidth in Hz

        @param      mcast_to_beam_map   A JSON mapping between output multicast addresses and beam IDs. This is the sole
                                        authority for the number of beams that will be produced and their indexes. The map
                                        is in the form:

                                        @code
                                           {
                                              "spead://239.11.2.150:7147":"cfbf00001,cfbf00002,cfbf00003,cfbf00004",
                                              "spead://239.11.2.151:7147":"ifbf00001"
                                           }

        @param      feng_config    JSON dictionary containing general F-engine parameters.

                                        @code
                                           {
                                              'bandwidth': 856e6,
                                              'centre-frequency': 1200e6,
                                              'sideband': 'upper',
                                              'feng-antenna-map': {...},
                                              'sync-epoch': 12353524243.0,
                                              'nchans': 4096
                                           }

        @param      coherent_beam_config   A JSON object specifying the coherent beam configuration in the form:

                                           @code
                                              {
                                                'tscrunch':16,
                                                'fscrunch':1,
                                                'antennas':'m007,m008,m009'
                                              }
                                           @endcode

        @param      incoherent_beam_config  A JSON object specifying the incoherent beam configuration in the form:

                                           @code
                                              {
                                                'tscrunch':16,
                                                'fscrunch':1,
                                                'antennas':'m007,m008,m009'
                                              }
                                           @endcode

        @return     katcp reply object [[[ !configure ok | (fail [error description]) ]]]
        """
        if not self.idle:
            return ("fail", "FBF worker not in IDLE state")

        log.info("Preparing worker server instance")
        try:
            feng_config = json.loads(feng_config)
        except Exception as error:
            return ("fail", "Unable to parse F-eng config with error: {}".format(str(error)))
        try:
            mcast_to_beam_map = json.loads(mcast_to_beam_map)
        except Exception as error:
            return ("fail", "Unable to parse multicast beam mapping with error: {}".format(str(error)))
        try:
            coherent_beam_config = json.loads(coherent_beam_config)
        except Exception as error:
            return ("fail", "Unable to parse coherent beam config with error: {}".format(str(error)))
        try:
            incoherent_beam_config = json.loads(incoherent_beam_config)
        except Exception as error:
            return ("fail", "Unable to parse incoherent beam config with error: {}".format(str(error)))

        @coroutine
        def configure():
            self._state_sensor.set_value(self.PREPARING)
            log.debug("Starting delay configuration server client")
            self._delay_client = KATCPClientResource(dict(
                name="delay-configuration-client",
                address=(dc_ip, dc_port),
                controlled=True))
            self._delay_client.start()

            log.debug("Determining F-engine capture order")
            feng_capture_order_info = self._determine_feng_capture_order(feng_config['feng-antenna-map'], coherent_beam_config,
                incoherent_beam_config)
            log.debug("Capture order info: {}".format(feng_capture_order_info))
            feng_to_antenna_map = {value:key for key,value in feng_config['feng-antenna-map'].items()}
            antenna_capture_order_csv = ",".join([feng_to_antenna_map[feng_id] for feng_id in feng_capture_order_info['order']])
            self._antenna_capture_order_sensor.set_value(antenna_capture_order_csv)

            log.debug("Parsing F-engines to capture: {}".format(feng_groups))
            capture_range = ip_range_from_stream(feng_groups)
            ngroups = capture_range.count
            partition_nchans = nchans_per_group * ngroups
            partition_bandwidth = partition_nchans * chan_bw
            npol = 2
            ndim = 2
            nbits = 8
            tsamp = 1.0 / (feng_config['bandwidth'] / feng_config['nchans'])
            sample_clock = feng_config['bandwidth'] * 2
            timestamp_step =  feng_config['nchans'] * 2 * 256 # WARNING: This is only valid in 4k mode
            frequency_ids = [chan0_idx+nchans_per_group*ii for ii in range(ngroups)] #WARNING: Assumes contigous groups
            mkrecv_config = {
                'frequency_mhz': (chan0_freq + feng_config['nchans']/2.0 * chan_bw) / 1e6,
                'bandwidth': partition_bandwidth,
                'tsamp_us': tsamp * 1e6,
                'bytes_per_second': partition_bandwidth * npol * ndim * nbits,
                'nchan': partition_nchans,
                'dada_key': self._dada_input_key,
                'nantennas': len(feng_capture_order_info['order']),
                'antennas_csv': antenna_capture_order_csv,
                'sync_epoch': feng_config['sync-epoch'],
                'sample_clock': sample_clock,
                'mcast_sources': ",".join([str(group) for group in capture_range]),
                'mcast_port': capture_range.port,
                'interface': "192.168.0.1",
                'timestamp_step': timestamp_step,
                'ordered_feng_ids_csv': ",".join(map(str, feng_capture_order_info['order'])),
                'frequency_partition_ids_csv': ",".join(map(str,frequency_ids))
            }
            mkrecv_header = make_mkrecv_header(mkrecv_config)
            self._mkrecv_header_sensor.set_value(mkrecv_header)
            log.info("Determined MKRECV configuration:\n{}".format(mkrecv_header))


            log.debug("Parsing beam to multicast mapping")
            incoherent_beam = None
            incoherent_beam_group = None
            coherent_beam_to_group_map = {}
            for group, beams in mcast_to_beam_map.items():
                for beam in beams.split(","):
                    if beam.startswith("cfbf"):
                        coherent_beam_to_group_map[beam] = group
                    if beam.startswith("ifbf"):
                        incoherent_beam = beam
                        incoherent_beam_group = group

            log.debug("Determined coherent beam to multicast mapping: {}".format(coherent_beam_to_group_map))
            if incoherent_beam:
                log.debug("Incoherent beam will be sent to: {}".format(incoherent_beam_group))
            else:
                log.debug("No incoherent beam specified")


            """
            Tasks:
                - compile kernels
                - create shared memory banks
            """
            # Compile beamformer
            # TBD

            # Need to come up with a good way to allocate keys for dada buffers

            # Create input DADA buffer
            log.debug("Creating dada buffer for input with key '{}'".format("%x"%self._dada_input_key))
            #self._system_call_wrapper(["dada_db","-k",self._dada_input_key,"-n","64","-l","-p"])

            # Create coherent beam output DADA buffer
            log.debug("Creating dada buffer for coherent beam output with key '{}'".format("%x"%self._dada_coh_output_key))
            #self._system_call_wrapper(["dada_db","-k",self._dada_coh_output_key,"-n","64","-l","-p"])

            # Create incoherent beam output DADA buffer
            log.debug("Creating dada buffer for incoherent beam output with key '{}'".format("%x"%self._dada_incoh_output_key))
            #self._system_call_wrapper(["dada_db","-k",self._dada_incoh_output_key,"-n","64","-l","-p"])

            # Create SPEAD transmitter for coherent beams
            # Call to MKSEND

            # Create SPEAD transmitter for incoherent beam
            # Call to MKSEND

            # Need to pass the delay buffer controller the F-engine capture order but only for the coherent beams
            cstart, cend = feng_capture_order_info['coherent_span']
            coherent_beam_feng_capture_order = feng_capture_order_info['order'][cstart:cend]
            coherent_beam_antenna_capture_order = [feng_to_antenna_map[idx] for idx in coherent_beam_feng_capture_order]


            # Start DelayBufferController instance
            # Here we are going to make the assumption that the server and processing all run in
            # one docker container that will be preallocated with the right CPU set, GPUs, memory
            # etc. This means that the configurations need to be unique by NUMA node... [Note: no
            # they don't, we can use the container IPC channel which isolates the IPC namespaces.]
            if not self._dummy:
                n_coherent_beams = len(coherent_beam_to_group_map)
                coherent_beam_antennas = parse_csv_antennas(coherent_beam_config['antennas'])
                self._delay_buffer_controller = DelayBufferController(self._delay_client,
                    coherent_beam_to_group_map.keys(),
                    coherent_beam_antenna_capture_order, 1)
                yield self._delay_buffer_controller.start()
            # Start beamformer instance
            # TBD

            # Define MKRECV configuration file

            # SPEAD receiver does not get started until a capture init call
            self._state_sensor.set_value(self.READY)
            req.reply("ok",)

        self.ioloop.add_callback(configure)
        raise AsyncReply

    @request(Str())
    @return_reply()
    def request_deconfigure(self, req):
        """
        @brief      Deconfigure the FBFUSE instance.

        @note       Deconfigure the FBFUSE instance. If FBFUSE uses katportalclient to get information
                    from CAM, then it should disconnect at this time.

        @param      req               A katcp request object

        @return     katcp reply object [[[ !deconfigure ok | (fail [error description]) ]]]
        """

        # Need to make sure everything is stopped
        # Call self.stop?

        # Need to delete all allocated DADA buffers:

        @coroutine
        def deconfigure():
            log.info("Destroying dada buffer for input with key '{}'".format(self._dada_input_key))
            self._system_call_wrapper(["dada_db","-k",self._dada_input_key,"-d"])
            log.info("Destroying dada buffer for coherent beam output with key '{}'".format(self._dada_coh_output_key))
            self._system_call_wrapper(["dada_db","-k",self._dada_coh_output_key,"-n","64","-l","-p"])
            log.info("Destroying dada buffer for incoherent beam output with key '{}'".format(self._dada_incoh_output_key))
            self._system_call_wrapper(["dada_db","-k",self._dada_coh_output_key,"-n","64","-l","-p"])
            log.info("Destroying delay buffer controller")
            del self._delay_buffer_controller
            self._delay_buffer_controller = None
            req.reply("ok",)

        self.ioloop.add_callback(deconfigure)
        raise AsyncReply

    @request(Str())
    @return_reply()
    def request_capture_start(self, req):
        """
        @brief      Prepare FBFUSE ingest process for data capture.

        @note       A successful return value indicates that FBFUSE is ready for data capture and
                    has sufficient resources available. An error will indicate that FBFUSE is not
                    in a position to accept data

        @param      req               A katcp request object


        @return     katcp reply object [[[ !capture-init ok | (fail [error description]) ]]]
        """
        if not self.ready:
            return ("fail", "FBF worker not in READY state")
        # Here we start MKRECV running into the input dada buffer
        self._mkrecv_ingest_proc = Popen(["mkrecv","--config",self._mkrecv_config_filename], stdout=PIPE, stderr=PIPE)
        return ("ok",)

    @request(Str())
    @return_reply()
    def request_capture_stop(self, req):
        """
        @brief      Terminate the FBFUSE ingest process for the particular FBFUSE instance

        @note       This writes out any remaining metadata, closes all files, terminates any remaining processes and
                    frees resources for the next data capture.

        @param      req               A katcp request object

        @param      product_id        This is a name for the data product, used to track which subarray is being told to stop capture.
                                      For example "array_1_bc856M4k".

        @return     katcp reply object [[[ !capture-done ok | (fail [error description]) ]]]
        """
        if not self.capturing and not self.error:
            return ("ok",)

        @coroutine
        def stop_mkrecv_capture():
            #send SIGTERM to MKRECV
            log.info("Sending SIGTERM to MKRECV process")
            self._mkrecv_ingest_proc.terminate()
            self._mkrecv_timeout = 10.0
            log.info("Waiting {} seconds for MKRECV to terminate...".format(self._mkrecv_timeout))
            now = time.time()
            while time.time()-now < self._mkrecv_timeout:
                retval = self._mkrecv_ingest_proc.poll()
                if retval is not None:
                    log.info("MKRECV returned a return value of {}".format(retval))
                    break
                else:
                    yield sleep(0.5)
            else:
                log.warning("MKRECV failed to terminate in alloted time")
                log.info("Killing MKRECV process")
                self._mkrecv_ingest_proc.kill()
            req.reply("ok",)
        self.ioloop.add_callback(self.stop_mkrecv_capture)
        raise AsyncReply
Ejemplo n.º 22
0
class KatcpSidecar(object):
    def __init__(self, host, port):
        """
        Constructs a new instance.

        :param      host:  The address of the server to sidecar
        :param      port:  The server port
        """
        log.debug("Constructing sidecar for {}:{}".format(host, port))
        self.rc = KATCPClientResource(
            dict(name="sidecar-client", address=(host, port), controlled=True))
        self._update_callbacks = set()
        self._previous_sensors = set()

    @coroutine
    def start(self):
        """
        @brief     Start the sidecar
        """
        @coroutine
        def _start():
            log.debug("Waiting on synchronisation with server")
            yield self.rc.until_synced()
            log.debug("Client synced")
            log.debug("Requesting version info")
            response = yield self.rc.req.version_list()
            log.info("response: {}".format(response))
            self.ioloop.add_callback(self.on_interface_changed)

        self.rc.start()
        self.ic = self.rc._inspecting_client
        self.ioloop = self.rc.ioloop
        self.ic.katcp_client.hook_inform(
            "interface-changed", lambda message: self.ioloop.add_callback(
                self.on_interface_changed))
        self.ioloop.add_callback(_start)

    def stop(self):
        """
        @brief      Stop the sidecar
        """
        self.rc.stop()

    @coroutine
    def on_interface_changed(self):
        """
        @brief    Synchronise with the sidecar'd servers new sensors
        """
        log.debug("Waiting on synchronisation with server")
        yield self.rc.until_synced()
        log.debug("Client synced")
        current_sensors = set(self.rc.sensor.keys())
        log.debug("Current sensor set: {}".format(current_sensors))
        removed = self._previous_sensors.difference(current_sensors)
        log.debug("Sensors removed since last update: {}".format(removed))
        added = current_sensors.difference(self._previous_sensors)
        log.debug("Sensors added since last update: {}".format(added))
        for name in list(added):
            log.debug("Setting sampling strategy and callbacks on sensor '{}'".
                      format(name))
            self.rc.set_sampling_strategy(name, "auto")
            self.rc.set_sensor_listener(name, self.on_sensor_update)
        self._previous_sensors = current_sensors

    @coroutine
    def on_sensor_update(self, sensor, reading):
        """
        @brief      Callback to be executed on a sensor being updated

        @param      sensor   A KATCP Sensor Object
        @param      reading  The sensor reading
        """
        log.debug("Recieved sensor update for sensor '{}': {}".format(
            sensor.name, repr(reading)))
        for callback in list(self._update_callbacks):
            try:
                callback(sensor, reading)
            except Exception as error:
                log.exception(
                    "Failed to call update callback {} with error: {}".format(
                        callback, str(error)))

    def add_sensor_update_callback(self, callback):
        """
        @brief    Add a sensor update callback.

        @param      callback:  The callback

        @note     The callback must have a call signature of
                  func(sensor, reading)
        """
        self._update_callbacks.add(callback)

    def remove_sensor_update_callback(self, callback):
        """
        @brief    Remove a sensor update callback.

        @param      callback:  The callback
        """
        self._update_callbacks.remove(callback)
Ejemplo n.º 23
0
class WorkerWrapper(object):
    """Wrapper around a client to an FbfWorkerServer
    instance.
    """
    def __init__(self, hostname, port):
        """
        @brief  Create a new wrapper around a client to a worker server

        @params hostname The hostname for the worker server
        @params port     The port number that the worker server serves on
        """
        log.debug("Creating worker client to worker at {}:{}".format(
            hostname, port))
        self._client = KATCPClientResource(
            dict(name="worker-server-client",
                 address=(hostname, port),
                 controlled=True))
        self.hostname = hostname
        self.port = port
        self.priority = 0  # Currently no priority mechanism is implemented
        self._started = False

    @coroutine
    def get_sensor_value(self, sensor_name):
        """
        @brief  Retrieve a sensor value from the worker
        """
        yield self._client.until_synced()
        response = yield self._client.req.sensor_value(sensor_name)
        if not response.reply.reply_ok():
            raise WorkerRequestError(response.reply.arguments[1])
        raise Return(response.informs[0].arguments[-1])

    def start(self):
        """
        @brief  Start the client to the worker server
        """
        log.debug("Starting client to worker at {}:{}".format(
            self.hostname, self.port))
        self._client.start()
        self._started = True

    @coroutine
    def reset(self):
        yield self._client.until_synced()
        response = yield self._client.req.reset()
        if not response.reply.reply_ok():
            raise WorkerRequestError(response.reply.arguments[1])

    def is_connected(self):
        return self._client.is_connected()

    def __repr__(self):
        return "<{} @ {}:{} (connected = {})>".format(self.__class__.__name__,
                                                      self.hostname, self.port,
                                                      self.is_connected())

    def __hash__(self):
        # This has override is required to allow these wrappers
        # to be used with set() objects. The implication is that
        # the combination of hostname and port is unique for a
        # worker server
        return hash((self.hostname, self.port))

    def __eq__(self, other):
        # Also implemented to help with hashing
        # for sets
        return self.__hash__() == hash(other)

    def __del__(self):
        if self._started:
            try:
                self._client.stop()
            except Exception as error:
                log.exception(str(error))
Ejemplo n.º 24
0
        def configure():
            self._state_sensor.set_value(self.PREPARING)
            log.debug("Starting delay configuration server client")
            self._delay_client = KATCPClientResource(
                dict(name="delay-configuration-client",
                     address=(dc_ip, dc_port),
                     controlled=True))
            self._delay_client.start()

            log.info("Determining F-engine capture order")
            feng_capture_order_info = determine_feng_capture_order(
                feng_config['feng-antenna-map'], coherent_beam_config,
                incoherent_beam_config)
            log.info("F-engine capture order info: {}".format(
                feng_capture_order_info))
            feng_to_antenna_map = {
                value: key
                for key, value in feng_config['feng-antenna-map'].items()
            }
            antenna_capture_order_csv = ",".join([
                feng_to_antenna_map[feng_id]
                for feng_id in feng_capture_order_info['order']
            ])
            self._antenna_capture_order_sensor.set_value(
                antenna_capture_order_csv)

            log.debug("Parsing F-engines to capture: {}".format(feng_groups))
            capture_range = ip_range_from_stream(feng_groups)
            ngroups = capture_range.count
            partition_nchans = nchans_per_group * ngroups
            worker_idx = chan0_idx / partition_nchans
            partition_bandwidth = partition_nchans * chan_bw
            self._partition_bandwidth = partition_bandwidth
            sample_clock = feng_config['bandwidth'] * 2
            timestamp_step = feng_config['nchans'] * 2 * 256
            frequency_ids = [
                chan0_idx + nchans_per_group * ii for ii in range(ngroups)
            ]
            nantennas = len(feng_capture_order_info['order'])
            heap_size = nchans_per_group * PACKET_PAYLOAD_SIZE
            heap_group_size = ngroups * heap_size * nantennas
            ngroups_data = int(MAX_DADA_BLOCK_SIZE / heap_group_size)
            ngroups_data = 2**((ngroups_data - 1).bit_length())
            centre_frequency = chan0_freq + self._partition_bandwidth / 2.0
            self._centre_frequency = centre_frequency

            # Coherent beam timestamps
            coh_heap_size = 8192
            nsamps_per_coh_heap = (
                coh_heap_size /
                (partition_nchans * coherent_beam_config['fscrunch']))
            coh_timestamp_step = (coherent_beam_config['tscrunch'] *
                                  nsamps_per_coh_heap * 2 *
                                  feng_config["nchans"])

            # Incoherent beam timestamps
            incoh_heap_size = 8192
            nsamps_per_incoh_heap = (
                incoh_heap_size /
                (partition_nchans * incoherent_beam_config['fscrunch']))
            incoh_timestamp_step = (incoherent_beam_config['tscrunch'] *
                                    nsamps_per_incoh_heap * 2 *
                                    feng_config["nchans"])

            timestamp_modulus = lcm(
                timestamp_step, lcm(incoh_timestamp_step, coh_timestamp_step))

            if self._exec_mode == FULL:
                dada_mode = 4
            else:
                dada_mode = 0
            mkrecv_config = {
                'dada_mode':
                dada_mode,
                'dada_key':
                self._dada_input_key,
                'sync_epoch':
                feng_config['sync-epoch'],
                'sample_clock':
                sample_clock,
                'mcast_sources':
                ",".join([str(group) for group in capture_range]),
                'mcast_port':
                capture_range.port,
                'interface':
                self._capture_interface,
                'timestamp_step':
                timestamp_step,
                'timestamp_modulus':
                timestamp_modulus,
                'ordered_feng_ids_csv':
                ",".join(map(str, feng_capture_order_info['order'])),
                'frequency_partition_ids_csv':
                ",".join(map(str, frequency_ids)),
                'ngroups_data':
                ngroups_data,
                'heap_size':
                heap_size
            }
            mkrecv_header = make_mkrecv_header(mkrecv_config,
                                               outfile=MKRECV_CONFIG_FILENAME)
            self._mkrecv_header_sensor.set_value(mkrecv_header)
            log.info(
                "Determined MKRECV configuration:\n{}".format(mkrecv_header))

            coh_ip_range = ip_range_from_stream(
                coherent_beam_config['destination'])
            nbeams = coherent_beam_config['nbeams']
            nbeams_per_group = nbeams / coh_ip_range.count
            msg = "nbeams is not a mutliple of the IP range"
            assert nbeams % coh_ip_range.count == 0, msg
            """
            Note on data rates:
            For both the coherent and incoherent beams, we set the sending
            rate in MKSEND equal to 110% of the required data rate. This
            is a fudge to ensure that we send rapidly enough that MKSEND
            does not limit performance while at the same time ensuring that
            the burst rate out of the instrument is limited. This number
            may need to be tuned.
            """
            coh_data_rate = (partition_bandwidth /
                             coherent_beam_config['tscrunch'] /
                             coherent_beam_config['fscrunch'] *
                             nbeams_per_group * 1.1)
            heap_id_start = worker_idx * coh_ip_range.count
            log.debug("Determining MKSEND configuration for coherent beams")
            dada_mode = int(self._exec_mode == FULL)
            coherent_mcast_dest = coherent_beam_config['destination'].lstrip(
                "spead://").split(":")[0]
            mksend_coh_config = {
                'dada_key': self._dada_coh_output_key,
                'dada_mode': dada_mode,
                'interface': self._capture_interface,
                'data_rate': coh_data_rate,
                'mcast_port': coh_ip_range.port,
                'mcast_destinations': coherent_mcast_dest,
                'sync_epoch': feng_config['sync-epoch'],
                'sample_clock': sample_clock,
                'heap_size': coh_heap_size,
                'heap_id_start': heap_id_start,
                'timestamp_step': coh_timestamp_step,
                'beam_ids': "0:{}".format(nbeams),
                'multibeam': True,
                'subband_idx': chan0_idx,
                'heap_group': nbeams_per_group
            }
            mksend_coh_header = make_mksend_header(
                mksend_coh_config, outfile=MKSEND_COHERENT_CONFIG_FILENAME)
            log.info(("Determined MKSEND configuration for coherent beams:\n{}"
                      ).format(mksend_coh_header))
            self._mksend_coh_header_sensor.set_value(mksend_coh_header)

            log.debug("Determining MKSEND configuration for incoherent beams")
            incoh_data_rate = (partition_bandwidth /
                               incoherent_beam_config['tscrunch'] /
                               incoherent_beam_config['fscrunch'] * 1.1)
            dada_mode = int(self._exec_mode == FULL)
            incoh_ip_range = ip_range_from_stream(
                incoherent_beam_config['destination'])
            coherent_mcast_dest = incoherent_beam_config['destination'].lstrip(
                "spead://").split(":")[0]
            mksend_incoh_config = {
                'dada_key': self._dada_incoh_output_key,
                'dada_mode': dada_mode,
                'interface': self._capture_interface,
                'data_rate': incoh_data_rate,
                'mcast_port': incoh_ip_range.port,
                'mcast_destinations': coherent_mcast_dest,
                'sync_epoch': feng_config['sync-epoch'],
                'sample_clock': sample_clock,
                'heap_size': incoh_heap_size,
                'heap_id_start': worker_idx,
                'timestamp_step': incoh_timestamp_step,
                'beam_ids': 0,
                'multibeam': False,
                'subband_idx': chan0_idx,
                'heap_group': 1
            }
            mksend_incoh_header = make_mksend_header(
                mksend_incoh_config, outfile=MKSEND_INCOHERENT_CONFIG_FILENAME)
            log.info(
                "Determined MKSEND configuration for incoherent beam:\n{}".
                format(mksend_incoh_header))
            self._mksend_incoh_header_sensor.set_value(mksend_incoh_header)
            """
            Tasks:
                - compile kernels
                - create shared memory banks
            """
            # Here we create a future object for the psrdada_cpp compilation
            # this is the longest running setup task and so intermediate steps
            # such as dada buffer generation
            fbfuse_pipeline_params = {
                'total_nantennas':
                len(feng_capture_order_info['order']),
                'fbfuse_nchans':
                partition_nchans,
                'total_nchans':
                feng_config['nchans'],
                'coherent_tscrunch':
                coherent_beam_config['tscrunch'],
                'coherent_fscrunch':
                coherent_beam_config['fscrunch'],
                'coherent_nantennas':
                len(coherent_beam_config['antennas'].split(",")),
                'coherent_antenna_offset':
                feng_capture_order_info["coherent_span"][0],
                'coherent_nbeams':
                nbeams,
                'incoherent_tscrunch':
                incoherent_beam_config['tscrunch'],
                'incoherent_fscrunch':
                incoherent_beam_config['fscrunch']
            }
            psrdada_compilation_future = compile_psrdada_cpp(
                fbfuse_pipeline_params)

            log.info("Creating all DADA buffers")
            # Create capture data DADA buffer
            capture_block_size = ngroups_data * heap_group_size
            capture_block_count = int(AVAILABLE_CAPTURE_MEMORY /
                                      capture_block_size)
            log.debug("Creating dada buffer for input with key '{}'".format(
                "%s" % self._dada_input_key))
            input_make_db_future = self._make_db(self._dada_input_key,
                                                 capture_block_size,
                                                 capture_block_count)

            # Create coherent beam output DADA buffer
            coh_output_channels = (ngroups * nchans_per_group) / \
                coherent_beam_config['fscrunch']
            coh_output_samples = ngroups_data * \
                256 / coherent_beam_config['tscrunch']
            coherent_block_size = (nbeams * coh_output_channels *
                                   coh_output_samples)
            coherent_block_count = 32
            log.debug(
                ("Creating dada buffer for coherent beam output "
                 "with key '{}'").format("%s" % self._dada_coh_output_key))
            coh_output_make_db_future = self._make_db(
                self._dada_coh_output_key, coherent_block_size,
                coherent_block_count)

            # Create incoherent beam output DADA buffer
            incoh_output_channels = ((ngroups * nchans_per_group) /
                                     incoherent_beam_config['fscrunch'])
            incoh_output_samples = ((ngroups_data * 256) /
                                    incoherent_beam_config['tscrunch'])
            incoherent_block_size = incoh_output_channels * incoh_output_samples
            incoherent_block_count = 32
            log.debug(("Creating dada buffer for incoherent beam "
                       "output with key '{}'").format(
                           "%s" % self._dada_incoh_output_key))
            incoh_output_make_db_future = self._make_db(
                self._dada_incoh_output_key, incoherent_block_size,
                incoherent_block_count)

            # Need to pass the delay buffer controller the F-engine capture
            # order but only for the coherent beams
            cstart, cend = feng_capture_order_info['coherent_span']
            coherent_beam_feng_capture_order = feng_capture_order_info[
                'order'][cstart:cend]
            coherent_beam_antenna_capture_order = [
                feng_to_antenna_map[idx]
                for idx in coherent_beam_feng_capture_order
            ]

            # Start DelayBufferController instance
            # Here we are going to make the assumption that the server and processing all run in
            # one docker container that will be preallocated with the right CPU set, GPUs, memory
            # etc. This means that the configurations need to be unique by NUMA node... [Note: no
            # they don't, we can use the container IPC channel which isolates
            # the IPC namespaces.]
            #
            # Here we recreate the beam keys as they are handled by the BeamManager
            # instance in the product controller
            #
            beam_idxs = ["cfbf%05d" % (i) for i in range(nbeams)]
            self._delay_buf_ctrl = DelayBufferController(
                self._delay_client, beam_idxs,
                coherent_beam_antenna_capture_order, 1)
            yield self._delay_buf_ctrl.start()

            # By this point we require psrdada_cpp to have been compiled
            # as such we can yield on the future we created earlier
            yield psrdada_compilation_future

            # Now we can yield on dada buffer generation
            yield input_make_db_future
            yield coh_output_make_db_future
            yield incoh_output_make_db_future
            self._state_sensor.set_value(self.READY)
            log.info("Prepare request successful")
            req.reply("ok", )
Ejemplo n.º 25
0
        def configure():
            self._state_sensor.set_value(self.PREPARING)
            log.debug("Starting delay configuration server client")
            self._delay_client = KATCPClientResource(dict(
                name="delay-configuration-client",
                address=(dc_ip, dc_port),
                controlled=True))
            self._delay_client.start()

            log.debug("Determining F-engine capture order")
            feng_capture_order_info = self._determine_feng_capture_order(feng_config['feng-antenna-map'], coherent_beam_config,
                incoherent_beam_config)
            log.debug("Capture order info: {}".format(feng_capture_order_info))
            feng_to_antenna_map = {value:key for key,value in feng_config['feng-antenna-map'].items()}
            antenna_capture_order_csv = ",".join([feng_to_antenna_map[feng_id] for feng_id in feng_capture_order_info['order']])
            self._antenna_capture_order_sensor.set_value(antenna_capture_order_csv)

            log.debug("Parsing F-engines to capture: {}".format(feng_groups))
            capture_range = ip_range_from_stream(feng_groups)
            ngroups = capture_range.count
            partition_nchans = nchans_per_group * ngroups
            partition_bandwidth = partition_nchans * chan_bw
            npol = 2
            ndim = 2
            nbits = 8
            tsamp = 1.0 / (feng_config['bandwidth'] / feng_config['nchans'])
            sample_clock = feng_config['bandwidth'] * 2
            timestamp_step =  feng_config['nchans'] * 2 * 256 # WARNING: This is only valid in 4k mode
            frequency_ids = [chan0_idx+nchans_per_group*ii for ii in range(ngroups)] #WARNING: Assumes contigous groups
            mkrecv_config = {
                'frequency_mhz': (chan0_freq + feng_config['nchans']/2.0 * chan_bw) / 1e6,
                'bandwidth': partition_bandwidth,
                'tsamp_us': tsamp * 1e6,
                'bytes_per_second': partition_bandwidth * npol * ndim * nbits,
                'nchan': partition_nchans,
                'dada_key': self._dada_input_key,
                'nantennas': len(feng_capture_order_info['order']),
                'antennas_csv': antenna_capture_order_csv,
                'sync_epoch': feng_config['sync-epoch'],
                'sample_clock': sample_clock,
                'mcast_sources': ",".join([str(group) for group in capture_range]),
                'mcast_port': capture_range.port,
                'interface': "192.168.0.1",
                'timestamp_step': timestamp_step,
                'ordered_feng_ids_csv': ",".join(map(str, feng_capture_order_info['order'])),
                'frequency_partition_ids_csv': ",".join(map(str,frequency_ids))
            }
            mkrecv_header = make_mkrecv_header(mkrecv_config)
            self._mkrecv_header_sensor.set_value(mkrecv_header)
            log.info("Determined MKRECV configuration:\n{}".format(mkrecv_header))


            log.debug("Parsing beam to multicast mapping")
            incoherent_beam = None
            incoherent_beam_group = None
            coherent_beam_to_group_map = {}
            for group, beams in mcast_to_beam_map.items():
                for beam in beams.split(","):
                    if beam.startswith("cfbf"):
                        coherent_beam_to_group_map[beam] = group
                    if beam.startswith("ifbf"):
                        incoherent_beam = beam
                        incoherent_beam_group = group

            log.debug("Determined coherent beam to multicast mapping: {}".format(coherent_beam_to_group_map))
            if incoherent_beam:
                log.debug("Incoherent beam will be sent to: {}".format(incoherent_beam_group))
            else:
                log.debug("No incoherent beam specified")


            """
            Tasks:
                - compile kernels
                - create shared memory banks
            """
            # Compile beamformer
            # TBD

            # Need to come up with a good way to allocate keys for dada buffers

            # Create input DADA buffer
            log.debug("Creating dada buffer for input with key '{}'".format("%x"%self._dada_input_key))
            #self._system_call_wrapper(["dada_db","-k",self._dada_input_key,"-n","64","-l","-p"])

            # Create coherent beam output DADA buffer
            log.debug("Creating dada buffer for coherent beam output with key '{}'".format("%x"%self._dada_coh_output_key))
            #self._system_call_wrapper(["dada_db","-k",self._dada_coh_output_key,"-n","64","-l","-p"])

            # Create incoherent beam output DADA buffer
            log.debug("Creating dada buffer for incoherent beam output with key '{}'".format("%x"%self._dada_incoh_output_key))
            #self._system_call_wrapper(["dada_db","-k",self._dada_incoh_output_key,"-n","64","-l","-p"])

            # Create SPEAD transmitter for coherent beams
            # Call to MKSEND

            # Create SPEAD transmitter for incoherent beam
            # Call to MKSEND

            # Need to pass the delay buffer controller the F-engine capture order but only for the coherent beams
            cstart, cend = feng_capture_order_info['coherent_span']
            coherent_beam_feng_capture_order = feng_capture_order_info['order'][cstart:cend]
            coherent_beam_antenna_capture_order = [feng_to_antenna_map[idx] for idx in coherent_beam_feng_capture_order]


            # Start DelayBufferController instance
            # Here we are going to make the assumption that the server and processing all run in
            # one docker container that will be preallocated with the right CPU set, GPUs, memory
            # etc. This means that the configurations need to be unique by NUMA node... [Note: no
            # they don't, we can use the container IPC channel which isolates the IPC namespaces.]
            if not self._dummy:
                n_coherent_beams = len(coherent_beam_to_group_map)
                coherent_beam_antennas = parse_csv_antennas(coherent_beam_config['antennas'])
                self._delay_buffer_controller = DelayBufferController(self._delay_client,
                    coherent_beam_to_group_map.keys(),
                    coherent_beam_antenna_capture_order, 1)
                yield self._delay_buffer_controller.start()
            # Start beamformer instance
            # TBD

            # Define MKRECV configuration file

            # SPEAD receiver does not get started until a capture init call
            self._state_sensor.set_value(self.READY)
            req.reply("ok",)
Ejemplo n.º 26
0
class EddServerProductController(object):
    def __init__(self, product_id, address, port):
        """
        Interface for pipeline instances using katcp.

        Args:
            product_id:        A unique identifier for this product
            r2rm_addr:         The address of the R2RM (ROACH2 resource manager) to be
                     used by this product. Passed in tuple format,
                     e.g. ("127.0.0.1", 5000)
        """
        log.debug("Installing controller for {} at {}, {}".format(
            product_id, address, port))
        self.ip = address
        self.port = port
        self._client = KATCPClientResource(
            dict(name="server-client_{}".format(product_id),
                 address=(address, int(port)),
                 controlled=True))

        self._product_id = product_id
        self._client.start()

    @coroutine
    def _safe_request(self, request_name, *args, **kwargs):
        log.debug("Sending request '{}' to {} with arguments {}".format(
            request_name, self._product_id, args))
        try:
            yield self._client.until_synced()
            response = yield self._client.req[request_name](*args, **kwargs)
        except Exception as E:
            log.error("Error processing request: {} in {}".format(
                E, self._product_id))
            raise E
        if not response.reply.reply_ok():
            erm = "'{}' request failed in {} with error: {}".format(
                request_name, self._product_id, response.reply.arguments[1])
            log.error(erm)
            raise RuntimeError(erm)
        else:
            log.debug("'{}' request successful".format(request_name))
            raise Return(response)

    @coroutine
    def deconfigure(self):
        """
        @brief      Deconfigure the product

        @detail
        """
        yield self._safe_request('deconfigure', timeout=120.0)

    @coroutine
    def configure(self, config={}):
        """
        @brief      A no-op method for supporting the product controller interface.
        """
        log.debug("Send cfg to {}".format(self._product_id))
        yield self._safe_request("configure",
                                 json.dumps(config),
                                 timeout=120.0)

    @coroutine
    def capture_start(self):
        """
        @brief      A no-op method for supporting the product controller interface.
        """
        yield self._safe_request("capture_start", timeout=120.0)

    @coroutine
    def capture_stop(self):
        """
        @brief      A no-op method for supporting the product controller interface.
        """
        yield self._safe_request("capture_stop", timeout=120.0)

    @coroutine
    def measurement_prepare(self, config={}):
        """
        @brief      A no-op method for supporting the product controller interface.
        """
        yield self._safe_request("measurement_prepare",
                                 json.dumps(config),
                                 timeout=120.0)

    @coroutine
    def measurement_start(self):
        """
        @brief      A no-op method for supporting the product controller interface.
        """
        yield self._safe_request("measurement_start", timeout=60.0)

    @coroutine
    def measurement_stop(self):
        """
        @brief      A no-op method for supporting the product controller interface.
        """
        yield self._safe_request("measurement_stop", timeout=60.0)

    @coroutine
    def set(self, config):
        """
        @brief      A no-op method for supporting the product controller interface.
        """
        log.debug("Send set to {}".format(self._product_id))
        yield self._safe_request("set", json.dumps(config), timeout=120.0)

    @coroutine
    def provision(self, config):
        """
        @brief      A no-op method for supporting the product controller interface.
        """
        log.debug("Send provision to {}".format(self._product_id))
        yield self._safe_request("provision", config, timeout=300.0)

    @coroutine
    def deprovision(self):
        """
        @brief      A no-op method for supporting the product controller interface.
        """
        log.debug("Send deprovision to {}".format(self._product_id))
        yield self._safe_request("deprovision", timeout=300.0)

    @coroutine
    def getConfig(self):
        """
        @brief      A no-op method for supporting the product controller interface.
        """
        log.debug("Send get config to {}".format(self._product_id))
        R = yield self._safe_request("sensor_value",
                                     "current-config",
                                     timeout=3)
        raise Return(json.loads(R.informs[0].arguments[-1]))

    @coroutine
    def ping(self):
        log.debug("Ping product {} at {}:{}.".format(self._product_id, self.ip,
                                                     self.port))
        try:
            yield self._client.until_synced(timeout=2)
            log.debug("product reachable")
            cfg = yield self.getConfig()
            if cfg['id'] != self._product_id:
                log.warning('Product id changed!')
                raise Return(False)
            log.debug("ID match")
        except TimeoutError:
            log.debug("Timeout Reached. Product inactive")
            raise Return(False)
        except Exception as E:
            log.error("Error during ping: {}".format(E))
            raise Return(False)
        raise Return(True)
Ejemplo n.º 27
0
class DigitiserPacketiserClient(object):
    def __init__(self, host, port=7147):
        """
        Wraps katcp commands to control a digitiser/packetiser.

        Args:
            host:  The host IP or name for the desired packetiser KATCP interface
            port:  The port number for the desired packetiser KATCP interface
        """
        self._host = host
        self._port = port
        self._client = KATCPClientResource(
            dict(name="digpack-client",
                 address=(self._host, self._port),
                 controlled=True))
        self._client.start()
        self._capture_started = False

        self._sampling_modes = {
            4096000000: ("virtex7_dk769b", "4.096GHz", 3),
            4000000000: ("virtex7_dk769b", "4.0GHz", 5),
            3600000000: ("virtex7_dk769b", "3.6GHz", 7),
            3520000000: ("virtex7_dk769b", "3.52GHz", 7),
            3500000000: ("virtex7_dk769b", "3.5GHz", 7),
            3200000000: ("virtex7_dk769b", "3.2GHz", 9),
            2600000000: ("virtex7_dk769b", "2.6GHz", 3),
            2560000000: ("virtex7_dk769b", "2.56GHz", 2),
            1750000000: (
                "virtex7_dk769b_test146.mkt", "3.5GHz", 7
            )  # This is  a special mode for the meerkat digitial filter cores inside the edd.
            # An effective 1750 Mhz sampling rate/ 875MHz
            # bandwidth  is achieved by digitial filtering of
            # the 3.5GHz sampled rate!
        }  # This is quite hacky, and the design of this client has to be  has to be improved. Possibly by ahving a client per firmware
        self.__firmware = None

    def stop(self):
        self._client.stop()

    @coroutine
    def _safe_request(self, request_name, *args):
        """
        Send a request to client and prints response ok /  error message.

        Args:
            request_name: Name of the request
            *args:        Arguments passed to the request.
        """
        _log.info("Sending packetiser request '{}' with arguments {}".format(
            request_name, args))
        yield self._client.until_synced()
        response = yield self._client.req[request_name](*args)
        if not response.reply.reply_ok():
            _log.error("'{}' request failed with error: {}".format(
                request_name, response.reply.arguments[1]))
            raise DigitiserPacketiserError(response.reply.arguments[1])
        else:
            _log.debug("'{}' request successful".format(request_name))
            raise Return(response)

    @coroutine
    def _check_interfaces(self, interfaces=['iface00', 'iface01']):
        """
        Check if interface of digitizer is in error state.
        """
        _log.debug("Checking status of 40 GbE interfaces")
        yield self._client.until_synced()

        @coroutine
        def _check_interface(name):
            _log.debug("Checking status of '{}'".format(name))
            sensor = self._client.sensor[
                'rxs_packetizer_40g_{}_am_lock_status'.format(name)]
            status = yield sensor.get_value()
            if not status == 0x0f:
                _log.warning("Interface '{}' in error state".format(name))
                raise PacketiserInterfaceError(
                    "40-GbE interface '{}' did not boot".format(name))
            else:
                _log.debug("Interface '{}' is healthy".format(name))

        for iface in interfaces:
            yield _check_interface(iface)

    @coroutine
    def set_predecimation(self, factor):
        """
        Set a predecimation factor for the packetizer - for e.g. factor=2 only every second sample is used.
        """
        allowedFactors = [1, 2, 4, 8,
                          16]  # Eddy Nussbaum, private communication
        if factor not in allowedFactors:
            raise RuntimeError(
                "predicimation factor {} not in allowed factors {}".format(
                    factor, allowedFactors))
        yield self._safe_request("rxs_packetizer_edd_predecimation", factor)

    @coroutine
    def set_noise_diode_frequency(self, frequency):
        """
        Set noise diode frequency to given value.
        """
        if frequency == 0:
            yield self.set_noise_diode_firing_pattern(0.0, 0.0, "now")
        else:
            yield self.set_noise_diode_firing_pattern(0.5, 1. / frequency,
                                                      "now")

    @coroutine
    def set_noise_diode_firing_pattern(self, percentage, period, start="now"):
        """
        Set noise diode frequency to given value.

        Args:
            percentage: Percentage of period which the noise diode is turned on.
            period:     Period of fireing [s].
        """
        _log.debug("Set noise diode firing pattern")
        yield self._safe_request("noise_source", start, percentage, period)

    @coroutine
    def set_sampling_rate(self, rate, retries=3):
        """
        Sets the sampling rate.

        Args:
            rate:    The sampling rate in samples per second (e.g. 2.6 GHz should be passed as 2600000000.0)


        To allow time for reinitialisation of the packetiser firmware during this call we enforce a 10
        second sleep before the function returns.
        """

        try:
            args = self._sampling_modes[int(rate)]
        except KeyError as error:
            pos_freqs = "\n".join(
                ["  - {} Hz ".format(f) for f in self._sampling_modes.keys()])
            error_msg = "Frequency {} Hz not in possible frequencies:\n{}".format(
                rate, pos_freqs)
            _log.error(error_msg)
            raise DigitiserPacketiserError(error_msg)

        attempts = 0
        while True:
            _log.debug("Reinit packetizer with firmware: {}".format(args[0]))

            response = yield self._safe_request("rxs_packetizer_system_reinit",
                                                *args)
            self.__firmware = args[0]
            yield sleep(20)
            try:
                _log.warning(
                    "Hard coded firmware names in interface checks. This is a shortterm hack!"
                )
                if args[0] == "virtex7_dk769b":
                    yield self._check_interfaces()
                elif args[0] == "virtex7_dk769b_test146.mkt":
                    yield self._check_interfaces(["iface00"])
                else:
                    RuntimeError("Unknown core")

            except PacketiserInterfaceError as error:
                if attempts >= retries:
                    raise error
                else:
                    _log.warning("Retrying system initalisation")
                    attempts += 1
                    continue
            else:
                break

    @coroutine
    def set_digitial_filter(self, filter_number):
        """
        Sets the digital filter number.

        """
        yield self._safe_request("rxs_packetizer_40g_filter_selection_set",
                                 filter_number)

    @coroutine
    def set_bit_width(self, nbits):
        """
        Sets the number of bits per sample out of the packetiser

        Args:
            nbits:  The desired number of bits per sample (e.g. 8 or 12)
        """
        valid_modes = {8: "edd08", 10: "edd10", 12: "edd12"}
        _log.warning("Firmware switch for bit set mode!")
        if self.__firmware == "virtex7_dk769b_test146.mkt":
            _log.debug("Firmware does not support setting bit rate!")
            return
        try:
            mode = valid_modes[int(nbits)]
        except KeyError as error:
            msg = "Invalid bit depth, valid bit depths are: {}".format(
                valid_modes.keys())
            _log.error(msg)
            raise DigitiserPacketiserError(msg)
        yield self._safe_request("rxs_packetizer_edd_switchmode", mode)

    @coroutine
    def flip_spectrum(self, flip):
        """
        Flip spectrum flip = True/False to adjust for even/odd nyquist zone
        """
        if flip:
            yield self._safe_request("rxs_packetizer_edd_flipsignalspectrum",
                                     "on")
        else:
            yield self._safe_request("rxs_packetizer_edd_flipsignalspectrum",
                                     "off")

    @coroutine
    def set_destinations(self, v_dest, h_dest):
        """
        Sets the multicast destinations for data out of the packetiser

        Args:
            v_dest:  The vertical polarisation channel destinations
            h_dest:  The horizontal polarisation channel destinations

        The destinations should be provided as composite stream definition
        strings, e.g. 225.0.0.152+3:7148 (this defines four multicast groups:
        225.0.0.152, 225.0.0.153, 225.0.0.154 and 225.0.0.155, all using
        port 7148). Currently the packetiser only accepts contiguous IP
        ranges for each set of destinations.
        """
        yield self._safe_request("capture_destination", "v", v_dest)
        yield self._safe_request("capture_destination", "h", h_dest)

    @coroutine
    def set_mac_address(self, intf, mac):
        """
        Sets the mac adresses of the source NICs of the packetiser

        Args:
            intf: The number of the NIC
            mac:  The mac of the NIC
        """
        yield self._safe_request("rxs_packetizer_40g_source_mac_set", intf,
                                 mac)

    @coroutine
    def set_predecimation_factor(self, factor):
        """
        Sets the predecimation_factorfor data out of the packetiser

        Args:
            factor: (e.g. 1,2,4,8)

        """
        yield self._safe_request("rxs_packetizer_edd_predecimation", factor)

    @coroutine
    def enable_snapshot(self, time=5):
        yield self._safe_request("rxs_packetizer_snapshot_enable_spec", time)
        yield self._safe_request("rxs_packetizer_snapshot_enable_spec")

    @coroutine
    def set_flipsignalspectrum(self, value):
        """
        Sets the rxs-packetizer-edd-flipsignalspectrum data out of the packetiser

        Args:
            value: (e.g. 0, 1)

        """
        yield self._safe_request("rxs_packetizer_edd_flipsignalspectrum",
                                 value)

    @coroutine
    def set_interface_address(self, intf, ip):
        """
        Set the interface address for a packetiser qsfp interface

        Args:

            intf:   The interface specified as a string integer, e.g. '0' or '1'
            ip:     The IP address to assign to the interface
        """
        yield self._safe_request("rxs_packetizer_40g_source_ip_set", intf, ip)

    @coroutine
    def capture_start(self):
        """
        Start data transmission for both polarisation channels

        This method uses the packetisers 'capture-start' method which is an
        aggregate command that ensures all necessary flags on the packetiser
        and set for data transmission.  This includes the 1PPS flag required by
        the ROACH2 boards.
        """
        if not self._capture_started:
            """
            Only start capture once and not twice if received configure
            """
            self._capture_started = True
            yield self._safe_request("capture_start", "vh")

    @coroutine
    def configure(self, config):
        """
        Applying configuration recieved in dictionary
        """
        self._capture_started = False
        yield self._safe_request("capture_stop", "vh")
        yield self.set_sampling_rate(config["sampling_rate"])
        yield self.set_predecimation(config["predecimation_factor"])
        yield self.flip_spectrum(config["flip_spectrum"])
        yield self.set_bit_width(config["bit_width"])
        yield self.set_destinations(config["v_destinations"],
                                    config["h_destinations"])
        if "noise_diode_frequency" in config:
            yield self.set_noise_diode_frequency(
                config["noise_diode_frequency"])

        for interface, ip_address in config["interface_addresses"].items():
            yield self.set_interface_address(interface, ip_address)
        if "sync_time" in config:
            yield self.synchronize(config["sync_time"])
        else:
            yield self.synchronize()
        yield self.capture_start()

    @coroutine
    def deconfigure(self):
        """
        Deconfigure. Not doing anythin
        """
        raise Return()

    @coroutine
    def measurement_start(self):
        """
        """
        raise Return()

    @coroutine
    def measurement_stop(self):
        """
        """
        raise Return()

    @coroutine
    def measurement_prepare(self, config={}):
        """
        """
        if "noise_diode_frequency" in config:
            yield self.set_noise_diode_frequency(
                config["noise_diode_frequency"])
        elif "noise_diode_pattern" in config:
            c = config["noise_diode_pattern"]
            yield self.set_noise_diode_firing_pattern(c["percentage"],
                                                      c["period"])

        raise Return()

    @coroutine
    def capture_stop(self):
        """
        Stop data transmission for both polarisation channels
        """
        _log.warning("Not stopping data transmission")
        raise Return()
        #yield self._safe_request("capture_stop", "vh")

    @coroutine
    def get_sync_time(self):
        """
        Get the current packetiser synchronisation epoch

        Return:
            The synchronisation epoch as a unix time float
        """
        response = yield self._safe_request("rxs_packetizer_40g_get_zero_time")
        sync_epoch = float(response.informs[0].arguments[0])
        raise Return(sync_epoch)

    @coroutine
    def get_snapshot(self):
        """
        Returns dictionary with snapshot data from the packetizer.
        """
        response = yield self._safe_request("rxs_packetizer_snapshot_get_spec")
        res = {}
        for message in response.informs:
            key = message.arguments[0]
            if 'header' in key:
                res[key] = dict(band_width=float(message.arguments[1]) * 1e3,
                                integration_time=float(message.arguments[2]),
                                num_channels=int(message.arguments[3]),
                                band_width_adc=float(message.arguments[4]),
                                spec_counter=int(message.arguments[5]),
                                timestamp=message.arguments[6])
            elif 'adc' in key:
                res[key] = np.fromstring(message.arguments[1],
                                         dtype=np.float32)
            elif 'level' in key:
                res[key] = np.fromstring(message.arguments[1], dtype=np.int32)

        raise Return(res)

    @coroutine
    def synchronize(self, unix_time=None):
        """
        Set the synchronisation epoch for the packetiser

        Args:
            unix_time:  The unix time to synchronise at. If no value is provided a
                               resonable value will be selected.

        When explicitly setting the synchronisation time it should be a second
        or two into the future allow enough time for communication with the
        packetiser. If the time is in the past by the time the request reaches
        the packetiser the next 1PPS tick will be selected.  Users *must* call
        get_sync_time to get the actual time that was set.  This call will
        block until the sync epoch has passed (i.e. if a sync epoch is chosen
        that is 10 second in the future, the call will block for 10 seconds).

        """
        if not unix_time:
            unix_time = round(time.time() + 2)
        yield self._safe_request("synchronise", 0, unix_time)
        sync_epoch = yield self.get_sync_time()
        if sync_epoch != unix_time:
            _log.warning(
                "Requested sync time {} not equal to actual sync time {}".
                format(unix_time, sync_epoch))

    @coroutine
    def populate_data_store(self, host, port):
        """
        Populate the data store

        Args:
            host:     ip of the data store to use
            port:     port of the data store
        """
        _log.debug("Populate data store @ {}:{}".format(host, port))
        dataStore = EDDDataStore(host, port)
        _log.debug("Adding output formats to known data formats")

        descr = {
            "description": "Digitizer/Packetizer spead. One heap per packet.",
            "ip": None,
            "port": None,
            "bit_depth": None,  # Dynamic Parameter
            "sample_rate": None,
            "sync_time": None,
            "samples_per_heap": 4096
        }

        dataStore.addDataFormatDefinition("MPIFR_EDD_Packetizer:1", descr)
        raise Return()
Ejemplo n.º 28
0
class DigitiserPacketiserClient(object):
    def __init__(self, host, port=7147):
        """
        @brief      Class for digitiser packetiser client.

        @param      host   The host IP or name for the desired packetiser KATCP interface
        @param      port   The port number for the desired packetiser KATCP interface
        """
        self._host = host
        self._port = port
        self._client = KATCPClientResource(
            dict(name="digpack-client",
                 address=(self._host, self._port),
                 controlled=True))
        self._client.start()

    def stop(self):
        self._client.stop()

    @coroutine
    def _safe_request(self, request_name, *args):
        log.info("Sending packetiser request '{}' with arguments {}".format(
            request_name, args))
        yield self._client.until_synced()
        response = yield self._client.req[request_name](*args)
        if not response.reply.reply_ok():
            log.error("'{}' request failed with error: {}".format(
                request_name, response.reply.arguments[1]))
            raise DigitiserPacketiserError(response.reply.arguments[1])
        else:
            log.debug("'{}' request successful".format(request_name))
            raise Return(response)

    @coroutine
    def _check_interfaces(self):
        log.debug("Checking status of 40 GbE interfaces")
        yield self._client.until_synced()

        @coroutine
        def _check_interface(name):
            log.debug("Checking status of '{}'".format(name))
            sensor = self._client.sensor[
                'rxs_packetizer_40g_{}_am_lock_status'.format(name)]
            status = yield sensor.get_value()
            if not status == 0x0f:
                log.warning("Interface '{}' in error state".format(name))
                raise PacketiserInterfaceError(
                    "40-GbE interface '{}' did not boot".format(name))
            else:
                log.debug("Interface '{}' is healthy".format(name))

        yield _check_interface('iface00')
        yield _check_interface('iface01')

    @coroutine
    def set_sampling_rate(self, rate, retries=3):
        """
        @brief      Sets the sampling rate.

        @param      rate    The sampling rate in samples per second (e.g. 2.6 GHz should be passed as 2600000000.0)

        @detail     To allow time for reinitialisation of the packetiser firmware during this call we enforce a 10
                    second sleep before the function returns.
        """
        valid_modes = {
            4000000000: ("virtex7_dk769b", "4.0GHz", 5),
            2600000000: ("virtex7_dk769b", "2.6GHz", 3)
        }
        try:
            args = valid_modes[rate]
        except KeyError as error:
            msg = "Invalid sampling rate, valid sampling rates are: {}".format(
                valid_modes.keys())
            log.error(msg)
            raise DigitiserPacketiserError(msg)

        attempts = 0
        while True:
            response = yield self._safe_request("rxs_packetizer_system_reinit",
                                                *args)
            yield sleep(10)
            try:
                yield self._check_interfaces()
            except PacketiserInterfaceError as error:
                if attempts >= retries:
                    raise error
                else:
                    log.warning("Retrying system initalisation")
                    attempts += 1
                    continue
            else:
                break

    @coroutine
    def set_bit_width(self, nbits):
        """
        @brief      Sets the number of bits per sample out of the packetiser

        @param      nbits  The desired number of bits per sample (e.g. 8 or 12)
        """
        valid_modes = {8: "edd08", 12: "edd12"}
        try:
            mode = valid_modes[nbits]
        except KeyError as error:
            msg = "Invalid bit depth, valid bit depths are: {}".format(
                valid_modes.keys())
            log.error(msg)
            raise DigitiserPacketiserError(msg)
        yield self._safe_request("rxs_packetizer_edd_switchmode", mode)

    @coroutine
    def set_destinations(self, v_dest, h_dest):
        """
        @brief      Sets the multicast destinations for data out of the packetiser

        @param      v_dest  The vertical polarisation channel destinations
        @param      h_dest  The horizontal polarisation channel destinations

        @detail     The destinations should be provided as composite stream definition
                    strings, e.g. 225.0.0.152+3:7148 (this defines four multicast groups:
                    225.0.0.152, 225.0.0.153, 225.0.0.154 and 225.0.0.155, all using
                    port 7148). Currently the packetiser only accepts contiguous IP
                    ranges for each set of destinations.
        """
        yield self._safe_request("capture_destination", "v", v_dest)
        yield self._safe_request("capture_destination", "h", h_dest)

    @coroutine
    def set_interface_address(self, intf, ip):
        """
        @brief      Set the interface address for a packetiser qsfp interface

        @param      intf   The interface specified as a string integer, e.g. '0' or '1'
        @param      ip     The IP address to assign to the interface
        """
        yield self._safe_request("rxs_packetizer_40g_source_ip_set", intf, ip)

    @coroutine
    def capture_start(self):
        """
        @brief      Start data transmission for both polarisation channels

        @detail     This method uses the packetisers 'capture-start' method
                    which is an aggregate command that ensures all necessary
                    flags on the packetiser and set for data transmission.
                    This includes the 1PPS flag required by the ROACH2 boards.
        """
        yield self._safe_request("capture_start", "vh")

    @coroutine
    def capture_stop(self):
        """
        @brief      Stop data transmission for both polarisation channels
        """
        yield self._safe_request("capture_stop", "vh")

    @coroutine
    def get_sync_time(self):
        """
        @brief      Get the current packetiser synchronisation epoch

        @return     The synchronisation epoch as a unix time float
        """
        response = yield self._safe_request("rxs_packetizer_40g_get_zero_time")
        sync_epoch = float(response.informs[0].arguments[0])
        raise Return(sync_epoch)

    @coroutine
    def synchronize(self, unix_time=None):
        """
        @brief      Set the synchronisation epoch for the packetiser

        @param      unix_time  The unix time to synchronise at. If no value is provided a
                               resonable value will be selected.

        @detail     When explicitly setting the synchronisation time it should be a
                    second or two into the future allow enough time for communication
                    with the packetiser. If the time is in the past by the time the request
                    reaches the packetiser the next 1PPS tick will be selected.
                    Users *must* call get_sync_time to get the actual time that was set.
                    This call will block until the sync epoch has passed (i.e. if a sync epoch
                    is chosen that is 10 second in the future, the call will block for 10 seconds).

        @note       The packetiser rounds to the nearest 1 PPS tick so it is recommended to
                    set the
        """
        if not unix_time:
            unix_time = round(time.time() + 2)
        yield self._safe_request("synchronise", 0, unix_time)
        sync_epoch = yield self.get_sync_time()
        if sync_epoch != unix_time:
            log.warning(
                "Requested sync time {} not equal to actual sync time {}".
                format(unix_time, sync_epoch))