Пример #1
0
class CriticalPFBPipeline(EDDPipeline):
    """@brief critical PFB pipeline class."""
    VERSION_INFO = ("mpikat-edd-api", 0, 1)
    BUILD_INFO = ("mpikat-edd-implementation", 0, 1, "rc1")

    def __init__(self, ip, port, scpi_ip, scpi_port):
        """@brief initialize the pipeline."""
        self._dada_buffers = []
        EDDPipeline.__init__(self, ip, port, scpi_ip, scpi_port)

    def setup_sensors(self):
        """
        @brief Setup monitoring sensors
        """
        EDDPipeline.setup_sensors(self)

        self._edd_config_sensor = Sensor.string(
            "current-config",
            description="The current configuration for the EDD backend",
            default=json.dumps(DEFAULT_CONFIG, indent=4),
            initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._edd_config_sensor)

        self._output_rate_status = Sensor.float(
            "output-rate",
            description="Output data rate [Gbyte/s]",
            initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._output_rate_status)

        self._polarization_sensors = {}
        for p in POLARIZATIONS:
            self._polarization_sensors[p] = {}
            self._polarization_sensors[p]["mkrecv_sensors"] = MkrecvSensors(p)
            for s in self._polarization_sensors[p][
                    "mkrecv_sensors"].sensors.itervalues():
                self.add_sensor(s)
            self._polarization_sensors[p][
                "input-buffer-fill-level"] = Sensor.float(
                    "input-buffer-fill-level-{}".format(p),
                    description=
                    "Fill level of the input buffer for polarization{}".format(
                        p),
                    params=[0, 1])
            self.add_sensor(
                self._polarization_sensors[p]["input-buffer-fill-level"])
            self._polarization_sensors[p][
                "input-buffer-total-write"] = Sensor.float(
                    "input-buffer-total-write-{}".format(p),
                    description=
                    "Total write into input buffer for polarization {}".format(
                        p),
                    params=[0, 1])

            self.add_sensor(
                self._polarization_sensors[p]["input-buffer-total-write"])
            self._polarization_sensors[p][
                "output-buffer-fill-level"] = Sensor.float(
                    "output-buffer-fill-level-{}".format(p),
                    description=
                    "Fill level of the output buffer for polarization {}".
                    format(p))
            self._polarization_sensors[p][
                "output-buffer-total-read"] = Sensor.float(
                    "output-buffer-total-read-{}".format(p),
                    description=
                    "Total read from output buffer for polarization {}".format(
                        p))
            self.add_sensor(
                self._polarization_sensors[p]["output-buffer-total-read"])
            self.add_sensor(
                self._polarization_sensors[p]["output-buffer-fill-level"])

    @coroutine
    def _create_ring_buffer(self, bufferSize, blocks, key, numa_node):
        """
         Create a ring buffer of given size with given key on specified numa node.
         Adds and register an appropriate sensor to thw list
         """
        # always clear buffer first. Allow fail here
        yield command_watcher("dada_db -d -k {key}".format(key=key),
                              allow_fail=True)

        cmd = "numactl --cpubind={numa_node} --membind={numa_node} dada_db -k {key} -n {blocks} -b {bufferSize} -p -l".format(
            key=key, blocks=blocks, bufferSize=bufferSize, numa_node=numa_node)
        log.debug("Running command: {0}".format(cmd))
        yield command_watcher(cmd)

        M = DbMonitor(key, self._buffer_status_handle)
        M.start()
        self._dada_buffers.append({'key': key, 'monitor': M})

    def _buffer_status_handle(self, status):
        """
        Process a change in the buffer status
        """
        for p in POLARIZATIONS:
            if status['key'] == self._config[p]['dada_key']:
                self._polarization_sensors[p][
                    "input-buffer-total-write"].set_value(status['written'])
                self._polarization_sensors[p][
                    "input-buffer-fill-level"].set_value(
                        status['fraction-full'])
            elif status['key'] == self._config[p]['dada_key'][::-1]:
                self._polarization_sensors[p][
                    "output-buffer-fill-level"].set_value(
                        status['fraction-full'])
                self._polarization_sensors[p][
                    "output-buffer-total-read"].set_value(status['read'])

    @coroutine
    def configure(self, config_json):
        """@brief destroy any ring buffer and create new ring buffer."""
        """
        @brief   Configure the EDD CCritical PFB

        @param   config_json    A JSON dictionary object containing configuration information

        @detail  The configuration dictionary is highly flexible. An example is below:
        """
        log.info("Configuring EDD backend for processing")
        log.debug("Configuration string: '{}'".format(config_json))
        if self.state != "idle":
            raise FailReply(
                'Cannot configure pipeline. Pipeline state {}.'.format(
                    self.state))
        # alternatively we should automatically deconfigure
        #yield self.deconfigure()

        self.state = "configuring"

        # Merge retrieved config into default via recursive dict merge
        def __updateConfig(oldo, new):
            old = oldo.copy()
            for k in new:
                if isinstance(old[k], dict):
                    old[k] = __updateConfig(old[k], new[k])
                else:
                    old[k] = new[k]
            return old

        if isinstance(config_json, str):
            cfg = json.loads(config_json)
        elif isinstance(config_json, dict):
            cfg = config_json
        else:
            self.state = "idle"  # no states changed
            raise FailReply(
                "Cannot handle config type {}. Config has to bei either json formatted string or dict!"
                .format(type(config_json)))
        try:
            self._config = __updateConfig(DEFAULT_CONFIG, cfg)
        except KeyError as error:
            self.state = "idle"  # no states changed
            raise FailReply("Unknown configuration option: {}".format(
                str(error)))

        cfs = json.dumps(self._config, indent=4)
        log.info("Received configuration:\n" + cfs)
        self._edd_config_sensor.set_value(cfs)

        # calculate input buffer parameters
        self.input_heapSize = self._config["samples_per_heap"] * self._config[
            'input_bit_depth'] / 8
        nHeaps = self._config["samples_per_block"] / self._config[
            "samples_per_heap"]
        input_bufferSize = nHeaps * (self.input_heapSize)
        log.info('Input dada parameters created from configuration:\n\
                heap size:        {} byte\n\
                heaps per block:  {}\n\
                buffer size:      {} byte'.format(self.input_heapSize, nHeaps,
                                                  input_bufferSize))

        # calculate output buffer parameters
        nSlices = max(
            self._config["samples_per_block"] / self._config['fft_length'], 1)
        nChannels = self._config['fft_length'] / 2
        # on / off spectrum  + one side channel item per spectrum
        output_bufferSize = nSlices * 2 * nChannels * self._config[
            'output_bit_depth'] / 8
        output_heapSize = output_bufferSize
        #output_bufferSize

        rate = output_bufferSize * float(
            self._config['sample_clock']
        ) / self._config[
            "samples_per_block"]  # in spead documentation BYTE per second and not bit!
        rate *= self._config[
            "output_rate_factor"]  # set rate to (100+X)% of expected rate
        self._output_rate_status.set_value(rate / 1E9)

        log.info('Output parameters calculated from configuration:\n\
                spectra per block:  {} \n\
                nChannels:          {} \n\
                buffer size:        {} byte \n\
                heap size:          {} byte\n\
                rate ({:.0f}%):        {} Gbps'.format(
            nSlices, nChannels, output_bufferSize, output_heapSize,
            self._config["output_rate_factor"] * 100, rate / 1E9))
        self._subprocessMonitor = SubprocessMonitor()

        for i, k in enumerate(self._config['enabled_polarizations']):
            numa_node = self._config[k]['numa_node']

            # configure dada buffer
            bufferName = self._config[k]['dada_key']
            yield self._create_ring_buffer(input_bufferSize, 64, bufferName,
                                           numa_node)

            ofname = bufferName[::-1]
            # we write nSlice blocks on each go
            yield self._create_ring_buffer(output_bufferSize, 64, ofname,
                                           numa_node)

            # Configure + launch
            # here should be a smarter system to parse the options from the
            # controller to the program without redundant typing of options
            physcpu = numa.getInfo()[numa_node]['cores'][0]
            cmd = "taskset {physcpu} pfb --input_key={dada_key} --inputbitdepth={input_bit_depth} --fft_length={fft_length} --ntaps={ntaps}   -o {ofname} --log_level={log_level} --outputbitdepth={output_bit_depth} --output_type=dada".format(
                dada_key=bufferName,
                ofname=ofname,
                heapSize=self.input_heapSize,
                numa_node=numa_node,
                physcpu=physcpu,
                **self._config)
            log.debug("Command to run: {}".format(cmd))

            cudaDevice = numa.getInfo()[self._config[k]
                                        ["numa_node"]]["gpus"][0]
            cli = ManagedProcess(cmd, env={"CUDA_VISIBLE_DEVICES": cudaDevice})
            self._subprocessMonitor.add(cli, self._subprocess_error)
            self._subprocesses.append(cli)

            cfg = self._config.copy()
            cfg.update(self._config[k])

            if self._config["output_type"] == 'dada':
                mksend_header_file = tempfile.NamedTemporaryFile(delete=False)
                mksend_header_file.write(mksend_header)
                mksend_header_file.close()

                timestep = input_bufferSize * 8 / cfg['input_bit_depth']
                physcpu = ",".join(numa.getInfo()[numa_node]['cores'][1:4])
                cmd = "taskset {physcpu} mksend --header {mksend_header} --nthreads 3 --dada-key {ofname} --ibv-if {ibv_if} --port {port_tx} --sync-epoch {sync_time} --sample-clock {sample_clock} --item1-step {timestep} --item2-list {polarization} --item3-list {fft_length} --item4-list {ntaps} --item6-list {sample_clock} --item5-list {sync_time} --rate {rate} --heap-size {heap_size} {mcast_dest}".format(
                    mksend_header=mksend_header_file.name,
                    timestep=timestep,
                    ofname=ofname,
                    polarization=i,
                    nChannels=nChannels,
                    physcpu=physcpu,
                    rate=rate,
                    heap_size=output_heapSize,
                    **cfg)

            elif self._config["output_type"] == 'disk':
                cmd = "dada_dbnull -z -k {}".format(ofname)
                if not os.path.isdir("./{ofname}".format(ofname=ofname)):
                    os.mkdir("./{ofname}".format(ofname=ofname))
                cmd = "dada_dbdisk -k {ofname} -D ./{ofname} -W".format(
                    ofname=ofname, **cfg)

            else:
                log.warning("Selected null output. Not sending data!")
                cmd = "dada_dbnull -z -k {}".format()

            log.debug("Command to run: {}".format(cmd))
            mks = ManagedProcess(cmd)
            self._subprocessMonitor.add(mks, self._subprocess_error)
            self._subprocesses.append(mks)

        self._subprocessMonitor.start()
        self.state = "ready"

    @coroutine
    def capture_start(self, config_json=""):
        """@brief start the dspsr instance then turn on dada_junkdb instance."""
        log.info("Starting EDD backend")
        if self.state != "ready":
            raise FailReply(
                "pipleine state is not in state = ready, but in state = {} - cannot start the pipeline"
                .format(self.state))
            #return

        self.state = "starting"
        try:
            mkrecvheader_file = tempfile.NamedTemporaryFile(delete=False)
            log.debug("Creating mkrec header file: {}".format(
                mkrecvheader_file.name))
            mkrecvheader_file.write(mkrecv_header)
            # DADA may need this
            mkrecvheader_file.write("NBIT {}\n".format(
                self._config["input_bit_depth"]))
            mkrecvheader_file.write("HEAP_SIZE {}\n".format(
                self.input_heapSize))

            mkrecvheader_file.write("\n#OTHER PARAMETERS\n")
            mkrecvheader_file.write("samples_per_block {}\n".format(
                self._config["samples_per_block"]))

            mkrecvheader_file.write(
                "\n#PARAMETERS ADDED AUTOMATICALLY BY MKRECV\n")
            mkrecvheader_file.close()

            for i, k in enumerate(self._config['enabled_polarizations']):
                cfg = self._config.copy()
                cfg.update(self._config[k])
                if not self._config['dummy_input']:
                    numa_node = self._config[k]['numa_node']
                    physcpu = ",".join(numa.getInfo()[numa_node]['cores'][4:9])
                    cmd = "taskset {physcpu} mkrecv_nt --quiet --header {mkrecv_header} --idx1-step {samples_per_heap} --dada-key {dada_key} \
                    --sync-epoch {sync_time} --sample-clock {sample_clock} \
                    --ibv-if {ibv_if} --port {port_rx} {mcast_sources}".format(
                        mkrecv_header=mkrecvheader_file.name,
                        physcpu=physcpu,
                        **cfg)
                    mk = ManagedProcess(
                        cmd,
                        stdout_handler=self._polarization_sensors[k]
                        ["mkrecv_sensors"].stdout_handler)
                else:
                    log.warning(
                        "Creating Dummy input instead of listening to network!"
                    )
                    cmd = "dummy_data_generator -o {dada_key} -b {input_bit_depth} -d 1000 -s 0".format(
                        **cfg)

                    mk = ManagedProcess(cmd)

                self.mkrec_cmd.append(mk)
                self._subprocessMonitor.add(mk, self._subprocess_error)

        except Exception as e:
            log.error("Error starting pipeline: {}".format(e))
            self.state = "error"
        else:
            self.state = "running"
            self.__watchdogs = []
            for i, k in enumerate(self._config['enabled_polarizations']):
                wd = SensorWatchdog(
                    self._polarization_sensors[k]["input-buffer-total-write"],
                    20, self.watchdog_error)
                wd.start()
                self.__watchdogs.append(wd)

    @coroutine
    def capture_stop(self):
        """@brief stop the dada_junkdb and dspsr instances."""
        log.info("Stoping EDD backend")
        if self.state != 'running':
            log.warning(
                "pipleine state is not in state = running but in state {}".
                format(self.state))
            # return
        log.debug("Stopping")
        for wd in self.__watchdogs:
            wd.stop_event.set()
        if self._subprocessMonitor is not None:
            self._subprocessMonitor.stop()

        # stop mkrec process
        log.debug("Stopping mkrecv processes ...")
        for proc in self.mkrec_cmd:
            proc.terminate()
        # This will terminate also the edd gpu process automatically

        yield self.deconfigure()

    @coroutine
    def deconfigure(self):
        """@brief deconfigure the dspsr pipeline."""
        log.info("Deconfiguring EDD backend")
        if self.state == 'runnning':
            yield self.capture_stop()

        self.state = "deconfiguring"
        if self._subprocessMonitor is not None:
            self._subprocessMonitor.stop()
        for proc in self._subprocesses:
            proc.terminate()

        self.mkrec_cmd = []

        log.debug("Destroying dada buffers")
        for k in self._dada_buffers:
            k['monitor'].stop()
            cmd = "dada_db -d -k {0}".format(k['key'])
            log.debug("Running command: {0}".format(cmd))
            yield command_watcher(cmd)

        self._dada_buffers = []
        self.state = "idle"
Пример #2
0
class VLBIPipeline(EDDPipeline):
    """@brief VLBI pipeline class."""
    VERSION_INFO = ("mpikat-edd-api", 0, 1)
    BUILD_INFO = ("mpikat-edd-implementation", 0, 1, "rc1")

    def __init__(self, ip, port):
        """@brief initialize the pipeline."""
        EDDPipeline.__init__(self, ip, port, _DEFAULT_CONFIG)
        self._dada_buffers = []
        self.mkrec_cmd = []


    def setup_sensors(self):
        """
        @brief Setup monitoring sensors
        """
        EDDPipeline.setup_sensors(self)

        self._integration_time_status = Sensor.float(
            "block-length-time",
            description="Length of a processing block [s]",
            initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._integration_time_status)

        self._output_rate_status = Sensor.float(
            "output-rate",
            description="Output data rate [Gbyte/s]",
            initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._output_rate_status)

        self._polarization_sensors = {}


    def add_input_stream_sensor(self, streamid):
        """
        @brief add sensors for i/o buffers for an input stream with given streamid.
        """
        self._polarization_sensors[streamid] = {}
        self._polarization_sensors[streamid]["mkrecv_sensors"] = MkrecvSensors(streamid)
        for s in self._polarization_sensors[streamid]["mkrecv_sensors"].sensors.values():
            self.add_sensor(s)
        self._polarization_sensors[streamid]["input-buffer-fill-level"] = Sensor.float(
                "input-buffer-fill-level-{}".format(streamid),
                description="Fill level of the input buffer for polarization{}".format(streamid),
                params=[0, 1])
        self.add_sensor(self._polarization_sensors[streamid]["input-buffer-fill-level"])
        self._polarization_sensors[streamid]["input-buffer-total-write"] = Sensor.float(
                "input-buffer-total-write-{}".format(streamid),
                description="Total write into input buffer for polarization {}".format(streamid),
                params=[0, 1])

        self.add_sensor(self._polarization_sensors[streamid]["input-buffer-total-write"])
        self._polarization_sensors[streamid]["output-buffer-fill-level"] = Sensor.float(
                "output-buffer-fill-level-{}".format(streamid),
                description="Fill level of the output buffer for polarization {}".format(streamid)
                )
        self._polarization_sensors[streamid]["output-buffer-total-read"] = Sensor.float(
                "output-buffer-total-read-{}".format(streamid),
                description="Total read from output buffer for polarization {}".format(streamid)
                )
        self.add_sensor(self._polarization_sensors[streamid]["output-buffer-total-read"])
        self.add_sensor(self._polarization_sensors[streamid]["output-buffer-fill-level"])


    @coroutine
    def _create_ring_buffer(self, bufferSize, blocks, key, numa_node):
         """
         @brief Create a ring buffer of given size with given key on specified numa node.
                Adds and register an appropriate sensor to thw list
         """
         # always clear buffer first. Allow fail here
         yield command_watcher("dada_db -d -k {key}".format(key=key), allow_fail=True)

         cmd = "numactl --cpubind={numa_node} --membind={numa_node} dada_db -k {key} -n {blocks} -b {bufferSize} -p -l".format(key=key, blocks=blocks, bufferSize=bufferSize, numa_node=numa_node)
         log.debug("Running command: {0}".format(cmd))
         yield command_watcher(cmd)

         M = DbMonitor(key, self._buffer_status_handle)
         M.start()
         self._dada_buffers.append({'key': key, 'monitor': M})


    def _buffer_status_handle(self, status):
        """
        @brief Process a change in the buffer status
        """
        for streamid, stream_description in self._config["input_data_streams"].items():
            if status['key'] == stream_description['dada_key']:
                self._polarization_sensors[streamid]["input-buffer-total-write"].set_value(status['written'])
                self._polarization_sensors[streamid]["input-buffer-fill-level"].set_value(status['fraction-full'])
        for streamid, stream_description in self._config["input_data_streams"].items():
            if status['key'] == stream_description['dada_key'][::-1]:
                self._polarization_sensors[streamid]["output-buffer-fill-level"].set_value(status['fraction-full'])
                self._polarization_sensors[streamid]["output-buffer-total-read"].set_value(status['read'])


    @state_change(target="configured", allowed=["idle"], intermediate="configuring")
    @coroutine
    def configure(self, config_json):
        """
        Configure the EDD VLBi pipeline

        Args:
            config_json    A JSON dictionary object containing configuration information
        """
        log.info("Configuring EDD backend for processing")
        log.debug("Configuration string: '{}'".format(config_json))

        yield self.set(config_json)

        cfs = json.dumps(self._config, indent=4)
        log.info("Final configuration:\n" + cfs)



        self.__numa_node_pool = []
        # remove numa nodes with missing capabilities
        for node in numa.getInfo():
            if len(numa.getInfo()[node]['gpus']) < 1:
                log.debug("Not enough gpus on numa node {} - removing from pool.".format(node))
                continue
            elif len(numa.getInfo()[node]['net_devices']) < 1:
                log.debug("Not enough nics on numa node {} - removing from pool.".format(node))
                continue
            else:
                self.__numa_node_pool.append(node)

        log.debug("{} numa nodes remaining in pool after cosntraints.".format(len(self.__numa_node_pool)))

        if len(self._config['input_data_streams']) > len(self.__numa_node_pool):
            raise FailReply("Not enough numa nodes to process {} polarizations!".format(len(self._config['input_data_streams'])))

        self._subprocessMonitor = SubprocessMonitor()
        #ToDo: Check that all input data streams have the same format, or allow different formats
        for i, streamid in enumerate(self._config['input_data_streams']):
            # calculate input buffer parameters
            stream_description = self._config['input_data_streams'][streamid]
            stream_description["dada_key"] = ["dada", "dadc"][i]
            self.add_input_stream_sensor(streamid)
            self.input_heapSize =  stream_description["samples_per_heap"] * stream_description['bit_depth'] / 8

            nHeaps = self._config["samples_per_block"] / stream_description["samples_per_heap"]
            input_bufferSize = nHeaps * (self.input_heapSize)
            log.info('Input dada parameters created from configuration:\n\
                    heap size:        {} byte\n\
                    heaps per block:  {}\n\
                    buffer size:      {} byte'.format(self.input_heapSize, nHeaps, input_bufferSize))


            final_payloads, final_fpss, final_framens = EDD_VDIF_Frame_Size(stream_description['sample_rate'])

            if self._config['payload_size'] == 'auto':
                payload_size = final_payloads[-1]
            else:
                payload_size = int(self._config['payload_size'])

            log.info('Possible frame payload sizes (add 32 for framesize):')
            for k in range(final_payloads.size):
                if payload_size == final_payloads[k]:
                    M = "*"
                else:
                    M = " "
                log.info(' {}{:5.0f} byte  {:8.0f} frames per sec  {:6.3f} nsec/frame'.format(M, final_payloads[k], final_fpss[k], final_framens[k]))

            if payload_size not in final_payloads:
                log.warning("Payload size {} possibly not conform with VDIF format!".format(payload_size))

            # calculate output buffer parameters
            size_of_samples = ceil(1. * self._config["samples_per_block"] * 2 / 8.) # byte for two bit mode
            number_of_packages = ceil(size_of_samples / float(payload_size))

            output_buffer_size = number_of_packages * (payload_size + self._config['vdif_header_size'])

            integration_time = self._config["samples_per_block"] / float(stream_description["sample_rate"])
            self._integration_time_status.set_value(integration_time)

            rate = output_buffer_size/ integration_time # in spead documentation BYTE per second and not bit!
            rate *= self._config["output_rate_factor"]        # set rate to (100+X)% of expected rate
            self._output_rate_status.set_value(rate / 1E9)

            log.info('Output parameters calculated from configuration:\n\
                total size of data samples:        {} byte\n\
                number_of_packages:  {}\n\
                size of output buffer:      {} byte\n\
                rate ({:.0f}%):        {} Gbps'.format(size_of_samples,
                    number_of_packages, output_buffer_size,
                    self._config["output_rate_factor"]*100, rate / 1E9))

            numa_node = self.__numa_node_pool[i]
            log.debug("Associating {} with numa node {}".format(streamid, numa_node))

            # configure dada buffer
            bufferName = stream_description['dada_key']
            yield self._create_ring_buffer(input_bufferSize, 64, bufferName, numa_node)

            ofname = bufferName[::-1]
            # we write nSlice blocks on each go
            yield self._create_ring_buffer(output_buffer_size, 8, ofname, numa_node)

            # Configure + launch 
            physcpu = numa.getInfo()[numa_node]['cores'][0]
            thread_id = self._config['thread_id'][streamid]
            station_id = self._config['thread_id'][streamid]
            cmd = "taskset -c {physcpu} VLBI --input_key={dada_key} --speadheap_size={heapSize} --thread_id={thread_id} --station_id={station_id} --payload_size={payload_size} --sample_rate={sample_rate} --nbits={bit_depth} -o {ofname} --log_level={log_level} --output_type=dada".format(ofname=ofname, heapSize=self.input_heapSize, numa_node=numa_node, physcpu=physcpu, thread_id=thread_id, station_id=station_id, payload_size=payload_size, log_level=self._config['log_level'], **stream_description)
            log.debug("Command to run: {}".format(cmd))

            cudaDevice = numa.getInfo()[numa_node]['gpus'][0]
            cli = ManagedProcess(cmd, env={"CUDA_VISIBLE_DEVICES": cudaDevice})
            self._subprocessMonitor.add(cli, self._subprocess_error)
            self._subprocesses.append(cli)

            cfg = self._config.copy()
            cfg.update(stream_description)

            ip_range = []
            port = set()
            for key in self._config["output_data_streams"]:
                if streamid in key:
                    ip_range.append(self._config["output_data_streams"][key]['ip'])
                    port.add(self._config["output_data_streams"][key]['port'])
            if len(port)!=1:
                raise FailReply("Output data for one plarization has to be on the same port! ")

            if self._config["output_type"] == 'network':
                physcpu = ",".join(numa.getInfo()[numa_node]['cores'][1:2])
                fastest_nic, nic_params = numa.getFastestNic(numa_node)
                log.info("Sending data for {} on NIC {} [ {} ] @ {} Mbit/s".format(streamid, fastest_nic, nic_params['ip'], nic_params['speed']))

                cmd = "taskset -c {physcpu} vdif_send --input_key {ofname} --if_ip {ibv_if} --dest_ip {mcast_dest} --port {port_tx} --max_rate {rate}".format(ofname=ofname, 
                        physcpu=physcpu, ibv_if=nic_params['ip'], mcast_dest=" ".join(ip_range), port_tx=port.pop(), rate=rate)
                log.debug("Command to run: {}".format(cmd))

            elif self._config["output_type"] == 'disk':
                ofpath = os.path.join(cfg["output_directory"], ofname)
                log.debug("Writing output to {}".format(ofpath))
                if not os.path.isdir(ofpath):
                    os.makedirs(ofpath)
                cmd = "dada_dbdisk -k {ofname} -D {ofpath} -W".format(ofname=ofname, ofpath=ofpath, **cfg)
            else:
                log.warning("Selected null output. Not sending data!")
                cmd = "dada_dbnull -z -k {}".format(ofname)

            log.debug("Command to run: {}".format(cmd))
            mks = ManagedProcess(cmd, env={"CUDA_VISIBLE_DEVICES": cudaDevice})
            self._subprocessMonitor.add(mks, self._subprocess_error)
            self._subprocesses.append(mks)

        self._subprocessMonitor.start()


    @state_change(target="streaming", allowed=["configured"], intermediate="capture_starting")
    @coroutine
    def capture_start(self, config_json=""):
        """
        @brief start streaming output
        """
        log.info("Starting EDD backend")
        try:
            for i, streamid in enumerate(self._config['input_data_streams']):
                stream_description = self._config['input_data_streams'][streamid]
                mkrecvheader_file = tempfile.NamedTemporaryFile(delete=False)
                log.debug("Creating mkrec header file: {}".format(mkrecvheader_file.name))
                mkrecvheader_file.write(_mkrecv_header)
                # DADA may need this
                # ToDo: Check for input stream definitions
                mkrecvheader_file.write("NBIT {}\n".format(stream_description["bit_depth"]))
                mkrecvheader_file.write("HEAP_SIZE {}\n".format(self.input_heapSize))

                mkrecvheader_file.write("\n#OTHER PARAMETERS\n")
                mkrecvheader_file.write("samples_per_block {}\n".format(self._config["samples_per_block"]))

                mkrecvheader_file.write("\n#PARAMETERS ADDED AUTOMATICALLY BY MKRECV\n")
                mkrecvheader_file.close()

                cfg = self._config.copy()
                cfg.update(stream_description)
                if not self._config['dummy_input']:
                    numa_node = self.__numa_node_pool[i]
                    fastest_nic, nic_params = numa.getFastestNic(numa_node)
                    log.info("Receiving data for {} on NIC {} [ {} ] @ {} Mbit/s".format(streamid, fastest_nic, nic_params['ip'], nic_params['speed']))
                    physcpu = ",".join(numa.getInfo()[numa_node]['cores'][2:7])
                    if self._config['idx1_modulo'] == 'auto':
                        idx1modulo = 10*cfg["samples_per_block"] / stream_description["samples_per_heap"]
                    else:
                        idx1modulo = self._config['idx1_modulo']

                    cmd = "taskset -c {physcpu} mkrecv_v4 --quiet --header {mkrecv_header} --idx1-step {samples_per_heap} --heap-size {input_heap_size} --idx1-modulo {idx1modulo} \
                    --dada-key {dada_key} --sync-epoch {sync_time} --sample-clock {sample_rate} \
                    --ibv-if {ibv_if} --port {port} {ip}".format(mkrecv_header=mkrecvheader_file.name, physcpu=physcpu,ibv_if=nic_params['ip'], input_heap_size=self.input_heapSize, idx1modulo=idx1modulo,
                            **cfg )
                    mk = ManagedProcess(cmd, stdout_handler=self._polarization_sensors[streamid]["mkrecv_sensors"].stdout_handler)
                else:
                    log.warning("Creating Dummy input instead of listening to network!")
                    cmd = "dada_junkdb -c 1 -R 1000 -t 3600 -k {dada_key} {mkrecv_header}".format(mkrecv_header=mkrecvheader_file.name,
                            **cfg )

                    mk = ManagedProcess(cmd)

                self.mkrec_cmd.append(mk)
                self._subprocessMonitor.add(mk, self._subprocess_error)

        except Exception as E:
            log.error("Error starting pipeline: {}".format(E))
            raise E
        else:
            self.__watchdogs = []
            for i, k in enumerate(self._config['input_data_streams']):
                wd = SensorWatchdog(self._polarization_sensors[streamid]["input-buffer-total-write"],
                        10 * self._integration_time_status.value(),
                        self.watchdog_error)
                wd.start()
                self.__watchdogs.append(wd)


    @state_change(target="idle", allowed=["streaming"], intermediate="capture_stopping")
    @coroutine
    def capture_stop(self):
        """
        @brief Stop streaming of data
        """
        log.info("Stoping EDD backend")
        for wd in self.__watchdogs:
            wd.stop_event.set()
            yield
        if self._subprocessMonitor is not None:
            self._subprocessMonitor.stop()
            yield

        # stop mkrec process
        log.debug("Stopping mkrecv processes ...")
        for proc in self.mkrec_cmd:
            proc.terminate()
            yield
        # This will terminate also the gated spectromenter automatically

        yield self.deconfigure()


    @state_change(target="idle", intermediate="deconfiguring", error='panic')
    @coroutine
    def deconfigure(self):
        """
        @brief deconfigure the pipeline.
        """
        log.info("Deconfiguring EDD backend")
        if self.previous_state == 'streaming':
            yield self.capture_stop()

        if self._subprocessMonitor is not None:
            yield self._subprocessMonitor.stop()
        for proc in self._subprocesses:
            yield proc.terminate()

        self.mkrec_cmd = []

        log.debug("Destroying dada buffers")
        for k in self._dada_buffers:
            k['monitor'].stop()
            cmd = "dada_db -d -k {0}".format(k['key'])
            log.debug("Running command: {0}".format(cmd))
            yield command_watcher(cmd)

        self._dada_buffers = []

    @coroutine
    def populate_data_store(self, host, port):
        """@brief Populate the data store"""
        log.debug("Populate data store @ {}:{}".format(host, port))
        dataStore =  EDDDataStore(host, port)
        log.debug("Adding output formats to known data formats")

        descr = {"description":"VDIF data stream",
                "ip": None,
                "port": None,
                }
        dataStore.addDataFormatDefinition("VDIF:1", descr)
Пример #3
0
class EddPulsarPipeline(EDDPipeline):
    """
    @brief Interface object which accepts KATCP commands

    """
    VERSION_INFO = ("mpikat-edd-api", 0, 1)
    BUILD_INFO = ("mpikat-edd-implementation", 0, 1, "rc1")

    def __init__(self, ip, port):
        """@brief initialize the pipeline."""
        EDDPipeline.__init__(self, ip, port, DEFAULT_CONFIG)
        self.mkrec_cmd = []
        self._dada_buffers = ["dada", "dadc"]
        self._dada_buffers_monitor = []
        self._data_processing_proc = None
        self._mkrecv_ingest_proc = None
        self._archive_directory_monitor = None

        # Pick first available numa node. Disable non-available nodes via
        # EDD_ALLOWED_NUMA_NODES environment variable
        self.numa_number = numa.getInfo().keys()[0]

    def setup_sensors(self):
        """
        @brief Setup monitoring sensors
        """
        EDDPipeline.setup_sensors(self)
        self._tscrunch = Sensor.string("tscrunch_PNG",
                                       description="tscrunch png",
                                       default=BLANK_IMAGE,
                                       initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._tscrunch)

        self._fscrunch = Sensor.string("fscrunch_PNG",
                                       description="fscrunch png",
                                       default=BLANK_IMAGE,
                                       initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._fscrunch)

        self._profile = Sensor.string("profile_PNG",
                                      description="pulse profile png",
                                      default=BLANK_IMAGE,
                                      initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._profile)

        self._central_freq = Sensor.string("_central_freq",
                                           description="_central_freq",
                                           default="N/A",
                                           initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._central_freq)

        self._source_name_sensor = Sensor.string("target_name",
                                                 description="target name",
                                                 default="N/A",
                                                 initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._source_name_sensor)

        self._nchannels = Sensor.string("_nchannels",
                                        description="_nchannels",
                                        default="N/A",
                                        initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._nchannels)

        self._nbins = Sensor.string("_nbins",
                                    description="_nbins",
                                    default="N/A",
                                    initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._nbins)

        self._time_processed = Sensor.string("_time_processed",
                                             description="_time_processed",
                                             default=0,
                                             initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._time_processed)

        self._dm_sensor = Sensor.string("_source_dm",
                                        description="_source_dm",
                                        default=0,
                                        initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._dm_sensor)

        self._par_dict_sensor = Sensor.string("_par_dict_sensor",
                                              description="_par_dict_sensor",
                                              default="N/A",
                                              initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._par_dict_sensor)
        self._directory_size_sensor = Sensor.string(
            "_directory_size_sensor",
            description="_directory_size_sensor",
            default=0,
            initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._directory_size_sensor)
        self._input_buffer_fill_level = Sensor.float(
            "input-buffer-fill-level",
            description="Fill level of the input buffer",
            params=[0, 1])
        self.add_sensor(self._input_buffer_fill_level)

        self._input_buffer_total_write = Sensor.float(
            "input-buffer-total-write",
            description="Total write into input buffer ",
            params=[0, 1])
        self.add_sensor(self._input_buffer_total_write)

        self._output_buffer_fill_level = Sensor.float(
            "output-buffer-fill-level",
            description="Fill level of the output buffer")
        self.add_sensor(self._output_buffer_fill_level)
        self._output_buffer_total_read = Sensor.float(
            "output-buffer-total-read",
            description="Total read from output buffer")
        self.add_sensor(self._output_buffer_total_read)

        self._polarization_sensors = {}

    @coroutine
    def _png_monitor(self):
        try:
            processed_seconds = int(
                os.popen("ls {}/*ar | wc -l".format(self.in_path)).read())
            self._time_processed.set_value("{} s".format(processed_seconds *
                                                         10))
            log.info("processed {}s".format(processed_seconds * 10))
        except Exception as error:
            log.debug(error)
        log.info("reading png from : {}".format(self.out_path))
        try:
            log.debug("reading {}/fscrunch.png".format(self.out_path))
            with open("{}/fscrunch.png".format(self.out_path),
                      "rb") as imageFile:
                image_fscrunch = base64.b64encode(imageFile.read())
                self._fscrunch.set_value(image_fscrunch)
        except Exception as error:
            log.debug(error)
        try:
            log.debug("reading {}/tscrunch.png".format(self.out_path))
            with open("{}/tscrunch.png".format(self.out_path),
                      "rb") as imageFile:
                image_tscrunch = base64.b64encode(imageFile.read())
                self._tscrunch.set_value(image_tscrunch)
        except Exception as error:
            log.debug(error)
        try:
            log.debug("reading {}/profile.png".format(self.out_path))
            with open("{}/profile.png".format(self.out_path),
                      "rb") as imageFile:
                image_profile = base64.b64encode(imageFile.read())
                self._profile.set_value(image_profile)
        except Exception as error:
            log.debug(error)
        return

    @coroutine
    def _folder_size_monitor(self):
        try:
            total_size = 0
            for root, dirs, files in os.walk("{}".format(self.in_path)):
                for f in files:
                    total_size += os.path.getsize(os.path.join(root, f))
            total_size_in_gb = total_size / 1024 / 1024 / 1024.
            self._directory_size_sensor.set_value("{} GB".format(
                int(total_size_in_gb)))
            log.info("Directory size = {} GB".format(total_size_in_gb))
        except Exception as error:
            log.debug(error)
        return

    def add_input_stream_sensor(self, streamid):
        """
        Add sensors for i/o buffers for an input stream with given streamid.
        """
        self._polarization_sensors[streamid] = {}
        self._polarization_sensors[streamid]["mkrecv_sensors"] = MkrecvSensors(
            streamid)
        for s in self._polarization_sensors[streamid][
                "mkrecv_sensors"].sensors.values():
            self.add_sensor(s)

    @coroutine
    def _create_ring_buffer(self, key, numa_node):
        """
        @brief Create a ring buffer of given size with given key on specified numa node.
               Adds and register an appropriate sensor to thw list
        """
        # always clear buffer first. Allow fail here
        yield command_watcher("taskset -c {cpus} dada_db -d -k {key}".format(
            key=key, cpus=self._config["buffer_core"][str(numa_node)]),
                              allow_fail=True)

        cmd = "numactl --cpubind={numa_node} --membind={numa_node} --physcpubind={cpus} dada_db -k {key} -n {blocks} -b {bufferSize} -p -l".format(
            key=key,
            cpus=self._config["buffer_core"][str(numa_node)],
            blocks=self._config[key]["number"],
            bufferSize=self._config[key]["size"],
            numa_node=numa_node)
        log.debug("Running command: {0}".format(cmd))
        yield command_watcher(cmd)
        M = DbMonitor(key, self._buffer_status_handle)
        M.start()
        self._dada_buffers_monitor.append({'key': key, 'monitor': M})

    def _buffer_status_handle(self, status):
        """
        Process a change in the buffer status
        """
        if status['key'] == "dada":
            self._input_buffer_total_write.set_value(status['written'])
            self._input_buffer_fill_level.set_value(status['fraction-full'])

        elif status['key'] == "dadc":
            self._output_buffer_fill_level.set_value(status['fraction-full'])
            self._output_buffer_total_read.set_value(status['read'])

    @coroutine
    def _reset_ring_buffer(self, key, numa_node):
        """
        @brief Create a ring buffer of given size with given key on specified numa node.
               Adds and register an appropriate sensor to thw list
        """
        # always clear buffer first. Allow fail here
        cmd = "numactl --cpubind={numa_node} --membind={numa_node} dbreset -k {key} --log_level debug".format(
            numa_node=numa_node, key=key)
        log.debug("Running command: {0}".format(cmd))
        yield command_watcher(cmd, allow_fail=True)

    @state_change(target="configured",
                  allowed=["idle"],
                  intermediate="configuring")
    @coroutine
    def configure(self, config_json):
        """
        @brief Configure the pipeline with the given config_json
        """
        log.info("Configuring EDD backend for processing")
        log.info("Configuration string: '{}'".format(config_json))
        yield self.set(config_json)

        if isinstance(self._config['input_data_streams'], dict):
            log.warning(
                "CHANGING INPUT DATA STREAM TYPE FROM DICT TO LIST - THIS IS A HACKY HACK AND BE DONE PROPERLY!"
            )
            l = [i for i in self._config['input_data_streams'].values()]
            self._config['input_data_streams'] = l
            log.debug(self._config)

        cfs = json.dumps(self._config, indent=4)
        log.info("Final configuration:\n" + cfs)

        self.__coreManager = CoreManager(self.numa_number)
        self.__coreManager.add_task("mkrecv",
                                    self._config["nstreams"] + 1,
                                    prefere_isolated=True)
        self.__coreManager.add_task("dspsr", self._config["dspsr_threads"])
        self.__coreManager.add_task("merge", self._config["merge_threads"])

        # The master contoller provides the data store IP as default gloal
        # config to all pipelines
        self.__eddDataStore = EDDDataStore(self._config["data_store"]["ip"],
                                           self._config["data_store"]["port"])

        self.sync_epoch = self._config['input_data_streams'][0]['sync_time']
        log.debug("sync_epoch = {}".format(self.sync_epoch))
        for key in self._dada_buffers:
            self._create_ring_buffer(key, self.numa_number)
            yield

        self.tzpar_dir = os.path.join("/mnt/", self._config["tzpar"])
        if not os.path.isdir(self.tzpar_dir):
            log.error("Not a directory {} !".format(self.tzpar_dir))
            raise RuntimeError("tzpar directory is no directory: {}".format(
                self.tzpar_dir))
        if os.path.isfile("/tmp/t2pred.dat"):
            os.remove("/tmp/t2pred.dat")
        self.add_input_stream_sensor("")
        self.mass_inform(Message.inform('interface-changed'))

    @state_change(target="ready",
                  allowed=["configured"],
                  intermediate="capture_starting")
    def capture_start(self):
        log.debug('Received capture start, doing nothing.')

    @state_change(target="ready",
                  allowed=["ready", "measurement_stopping"],
                  intermediate="measurement_preparing")
    @coroutine
    def measurement_prepare(self, config_json):
        """
        @brief Update pipeline configuration
        """
        log.info("Configuration string: '{}'".format(config_json))
        yield self.set(config_json)

    @state_change(target="measuring",
                  allowed=["ready"],
                  intermediate="measurement_starting")
    @coroutine
    def measurement_start(self):
        """
        @brief Create output directory, create mkrecv header, calculate tempo2 predictor file if needed, start data capture, processing, and monitoring
        """
        if self._config["active"] == 0:
            log.info("Pipeline is not active")
            raise StateChange("ready")
        self._timer = Time.now()
        self._subprocessMonitor = SubprocessMonitor()
        try:
            self._png_monitor_callback.stop()
        except Exception as E:
            log.error("Png monitor already stopped.")
            log.exception(E)
        try:
            self._folder_size_monitor_callback.stop()
        except Exception as E:
            log.error("File size monitor already stopped.")
            log.exception(E)

        self._source_name = self.__eddDataStore.getTelescopeDataItem(
            "source-name")
        ra = self.__eddDataStore.getTelescopeDataItem("ra")
        decl = self.__eddDataStore.getTelescopeDataItem("dec")
        scannum = self.__eddDataStore.getTelescopeDataItem("scannum")
        subscannum = self.__eddDataStore.getTelescopeDataItem("subscannum")
        receiver_name = self.__eddDataStore.getTelescopeDataItem("receiver")
        project_id = self.__eddDataStore.getTelescopeDataItem("project")
        log.info(
            "Retrieved data from telescope:\n   Source name: {}\n   RA = {},  decl = {}, receiver = {}"
            .format(self._source_name, ra, decl, receiver_name))

        if self._config["mode"] == "Timing":
            self.tzpar_file = os.path.join(
                self.tzpar_dir,
                '{}.par'.format(self._source_name.split("_")[0][1:]))
            log.debug("Checking parfile file {}".format(self.tzpar_file))
            self.pulsar_flag = is_accessible(self.tzpar_file)
            if (parse_tag(self._source_name) !=
                    "R") and (not self.pulsar_flag):
                log.warning(
                    "source {} is neither pulsar nor calibrator. Will not react until next measurement start"
                    .format(self._source_name))
                raise StateChange("ready")

        log.debug("Setting blank image")
        self._fscrunch.set_value(BLANK_IMAGE)
        self._tscrunch.set_value(BLANK_IMAGE)
        self._profile.set_value(BLANK_IMAGE)

        log.debug("writing mkrecv header")
        self.cuda_number = numa.getInfo()[self.numa_number]['gpus'][0]
        log.info("  - Running on cuda core: {}".format(self.cuda_number))
        header = self._config["dada_header_params"]
        central_freq = header["frequency_mhz"]
        self._central_freq.set_value(str(header["frequency_mhz"]))
        self._source_name_sensor.set_value(self._source_name)
        self._nchannels.set_value(self._config["nchannels"])
        self._nbins.set_value(self._config["nbins"])
        header["telescope"] = self._config["tempo2_telescope_name"]
        log.info("  - Tempo2 telescope name: {}".format(header['telescope']))

        c = SkyCoord("{} {}".format(ra, decl), unit=(u.deg, u.deg))
        header["ra"] = c.to_string("hmsdms").split(" ")[0].replace(
            "h", ":").replace("m", ":").replace("s", "")
        header["dec"] = c.to_string("hmsdms").split(" ")[1].replace(
            "d", ":").replace("m", ":").replace("s", "")
        header["key"] = self._dada_buffers[0]
        log.debug("  - Dada key: {}".format(header['key']))
        header["mc_source"] = ""
        for i in self._config['input_data_streams']:
            header["mc_source"] += i["ip"] + ","
        header["mc_source"] = header["mc_source"][:-1]
        log.info("  - mc source: {}".format(header['mc_source']))
        header["mc_streaming_port"] = self._config['input_data_streams'][0][
            "port"]
        log.info("  - mc streaming port: {}".format(
            header['mc_streaming_port']))
        header["interface"] = numa.getFastestNic(self.numa_number)[1]['ip']
        log.info("  - mc interface: {}".format(header['interface']))
        header["sync_time"] = self.sync_epoch
        log.info("  - sync time: {}".format(header['sync_time']))
        if header['sample_clock'] == "unset":
            header["sample_clock"] = float(
                self._config['input_data_streams'][0]["sample_rate"]
            )  # adjsutment for the predecimation factor is done in the amster controller
        log.info("  - sample_clock: {}".format(header['sample_clock']))
        header["source_name"] = self._source_name
        header["obs_id"] = "{0}_{1}".format(scannum, subscannum)
        header["filesize"] = int(float(self._config["dada"]["size"]))
        log.info("  - obs_id: {}".format(header['obs_id']))
        header["receiver_name"] = receiver_name
        log.info("  - receiver_name: {}".format(header['receiver_name']))
        tstr = Time.now().isot.replace(":", "-")
        tdate = tstr.split("T")[0]

        log.debug("Setting up the input and scrunch data directories")
        if self._config["mode"] == "Timing":
            try:
                self.in_path = os.path.join("/mnt/", project_id,
                                            tdate, self._source_name,
                                            str(central_freq), tstr,
                                            "raw_data")
                self.out_path = os.path.join("/mnt/", project_id, tdate,
                                             self._source_name,
                                             str(central_freq), tstr,
                                             "combined_data")
                log.debug("Creating directories")
                log.info("Data will be written to {}".format(self.in_path))
                log.debug("out path {}".format(self.out_path))
                if not os.path.isdir(self.in_path):
                    os.makedirs(self.in_path)
                if not os.path.isdir(self.out_path):
                    os.makedirs(self.out_path)
                os.chdir(self.in_path)
                log.debug("Change to workdir: {}".format(os.getcwd()))
                log.debug("Current working directory: {}".format(os.getcwd()))
            except Exception as error:
                raise EddPulsarPipelineError(str(error))
        else:
            try:
                self.in_path = os.path.join("/mnt/", project_id,
                                            tdate, self._source_name,
                                            str(central_freq), tstr)
                log.debug("Creating directories")
                log.info("Data will be written to {}".format(self.in_path))
                if not os.path.isdir(self.in_path):
                    os.makedirs(self.in_path)
                os.chdir(self.in_path)
                log.debug("Change to workdir: {}".format(os.getcwd()))
                log.debug("Current working directory: {}".format(os.getcwd()))
            except Exception as error:
                raise EddPulsarPipelineError(str(error))

        os.chdir("/tmp/")
        log.debug("Creating the predictor with tempo2")
        if self._config["mode"] == "Timing":
            if (parse_tag(self._source_name) != "R") & is_accessible(
                    self.tzpar_file):
                cmd = 'numactl -m {} taskset -c {} tempo2 -f {} -pred'.format(
                    self.numa_number, self.__coreManager.get_coresstr('dspsr'),
                    self.tzpar_file).split()
                cmd.append("{} {} {} {} {} {} {} 3599.999999999".format(
                    self._config["tempo2_telescope_name"],
                    Time.now().mjd - 0.5,
                    Time.now().mjd + 0.5,
                    float(central_freq) - (float(header["bandwidth"]) / 2),
                    float(central_freq) + (float(header["bandwidth"]) / 2),
                    self._config["tempo2_ntimecoeff"],
                    self._config["tempo2_nfreqcoeff"]))
                log.info("Command to run: {}".format(cmd))
                yield command_watcher(cmd, allow_fail=True)
                attempts = 0
                retries = 5
                while True:
                    if attempts >= retries:
                        error = "Could not read t2pred.dat"
                        log.warning(
                            "{}. Will not react until next measurement start".
                            format(error))
                        raise StateChange("ready")
                    else:
                        yield sleep(1)
                        if is_accessible('{}/t2pred.dat'.format(os.getcwd())):
                            log.debug('found {}/t2pred.dat'.format(
                                os.getcwd()))
                            break
                        else:
                            attempts += 1

        self.dada_header_file = tempfile.NamedTemporaryFile(
            mode="w",
            prefix="edd_dada_header_",
            suffix=".txt",
            dir="/tmp/",
            delete=False)
        log.debug("Writing dada header file to {0}".format(
            self.dada_header_file.name))
        header_string = render_dada_header(header)
        self.dada_header_file.write(header_string)
        self.dada_key_file = tempfile.NamedTemporaryFile(
            mode="w",
            prefix="dada_keyfile_",
            suffix=".key",
            dir="/tmp/",
            delete=False)
        log.debug("Writing dada key file to {0}".format(
            self.dada_key_file.name))
        key_string = make_dada_key_string(self._dada_buffers[1])
        self.dada_key_file.write(make_dada_key_string(self._dada_buffers[1]))
        log.debug("Dada key file contains:\n{0}".format(key_string))
        self.dada_header_file.close()
        self.dada_key_file.close()

        attempts = 0
        retries = 5
        while True:
            if attempts >= retries:
                error = "could not read dada_key_file"
                log.warning(
                    "{}. Will not react until next measurement start".format(
                        error))
                raise StateChange("ready")
            else:
                yield sleep(1)
                if is_accessible('{}'.format(self.dada_key_file.name)):
                    log.debug('found {}'.format(self.dada_key_file.name))
                    break
                else:
                    attempts += 1
        ## Setting DM value for filterbank recording
        self.par_dict = {}
        self.dm = self._config["cdd_dm"]
        try:
            with open(
                    os.path.join(
                        self.tzpar_dir, '{}.par'.format(
                            self._source_name.split("_")[0][1:]))) as fh:
                for line in fh:
                    if len(line.strip().split()) == 2:
                        key, value = line.strip().split()
                    elif len(line.strip().split()) == 3:
                        key, value, error = line.strip().split()
                    elif len(line.strip().split()) == 4:
                        key, value, lock, error = line.strip().split()
                    self.par_dict[key] = value.strip()
        except IOError as error:
            log.error(error)
        try:
            self.dm = float(self.par_dict["DM"])
        except KeyError as error:
            log.info("Key {} not found, will use default value of {}".format(
                error, self.dm))
        if parse_tag(self._source_name) == "R":
            log.info("This is a calibrator scan, will set dm to zero")
            self.dm = 0
        self._dm_sensor.set_value(self.dm)
        self._par_dict_sensor.set_value(
            json.dumps(self.par_dict).strip("{").strip("}").replace(",", "\n"))
        os.chdir(self.in_path)
        log.debug("source_name = {}".format(self._source_name))
        if self._config["mode"] == "Timing":
            if (parse_tag(self._source_name) != "R") and self.pulsar_flag:
                cmd = "numactl -m {numa} dspsr {args} {intergration_time} {nchan} {nbin} -fft-bench -x {fft_length} -cpu {cpus} -cuda {cuda_number} -P {predictor} -N {name} -E {parfile} {keyfile}".format(
                    numa=self.numa_number,
                    fft_length=self._config["fft_length"],
                    args=self._config["dspsr_params"]["args"],
                    intergration_time="-L {}".format(
                        self._config["intergration_time"]),
                    nchan="-F {}:D".format(self._config["nchannels"]),
                    nbin="-b {}".format(self._config["nbins"]),
                    name=self._source_name.split("_")[0],
                    predictor="/tmp/t2pred.dat",
                    parfile=self.tzpar_file,
                    cpus=self.__coreManager.get_coresstr('dspsr'),
                    cuda_number=self.cuda_number,
                    keyfile=self.dada_key_file.name)

            elif parse_tag(self._source_name) == "R":
                cmd = "numactl -m {numa} dspsr {args} {intergration_time} -c {period} -D 0.0001 -fft-bench -x {fft_length} {nchan} -cpu {cpus} -N {name} -cuda {cuda_number} {keyfile}".format(
                    numa=self.numa_number,
                    args=self._config["dspsr_params"]["args"],
                    intergration_time="-L {}".format(
                        self._config["intergration_time"]),
                    period=self._config["cal_period"],
                    fft_length=self._config["fft_length"],
                    nchan="-F {}:D".format(self._config["nchannels"]),
                    name=self._source_name,
                    cpus=self.__coreManager.get_coresstr('dspsr'),
                    cuda_number=self.cuda_number,
                    keyfile=self.dada_key_file.name)
            else:
                error = "source is unknown"
                raise EddPulsarPipelineError(error)

        if self._config["mode"] == "Searching":
            if self._config["file_length"] == "":
                file_length = ""
                filename = "-o {}_{}_{}.fits".format(self._source_name,
                                                     self.dm,
                                                     self._config["npol"])
            else:
                file_length = "-L {}".format(self._config["file_length"])
                filename = ""

            cmd = "numactl -m {numa} digifits {args} -b 8 -F {nchan}:D -D {DM} -t {tsamp} -nsblk {nsblk} {file_length} -p {npol} -f {decimation} -do_dedisp -x {fft_length} -cpu {cpus} -cuda {cuda_number} {filename} {keyfile}".format(
                numa=self.numa_number,
                npol=self._config["npol"],
                args=self._config["digifits_params"]["args"],
                DM=self.dm,
                nchan=self._config["filterbank_nchannels"],
                fft_length=self._config["fft_length"],
                decimation=self._config["decimation"],
                nsblk=self._config["nsblk"],
                tsamp=self._config["tsamp"],
                file_length=file_length,
                filename=filename,
                cpus=self.__coreManager.get_coresstr('dspsr'),
                cuda_number=self.cuda_number,
                keyfile=self.dada_key_file.name)

        if self._config["mode"] == "Baseband":
            cmd = "numactl -m {numa} taskset -c {cpus} dbdisk_multithread -n {thread} -k dadc".format(
                numa=self.numa_number,
                in_path=self.in_path,
                thread=self._config["dbdisk_writing_threads"],
                cpus=self.__coreManager.get_coresstr('dspsr'))

        if self._config["mode"] == "Leap_baseband":
            cmd = "numactl -m {numa} taskset -c {cpus} dbdiskleap -n 8".format(
                numa=self.numa_number,
                cpus=self.__coreManager.get_coresstr('dspsr'))

        log.debug("Running command: {0}".format(cmd))
        if self._config["mode"] == "Timing":
            log.info("Staring dspsr")
        if self._config["mode"] == "Searching":
            log.info("Staring digifits")
        if self._config["mode"] == "Baseband":
            log.info("Staring dbdisk_multithread")
        if self._config["mode"] == "Leap_baseband":
            log.info("Staring dbdiskleap")
        self._data_processing_proc = ManagedProcess(cmd)
        self._subprocessMonitor.add(self._data_processing_proc,
                                    self._subprocess_error)

        ####################################################
        #STARTING merging code                         #
        ####################################################
        if self._config["mode"] not in "Leap_baseband":
            cmd = "numactl -m {numa} taskset -c {cpu} {merge_application} -p {npart} -n {nthreads} --log_level=info".format(
                numa=self.numa_number,
                cpu=self.__coreManager.get_coresstr('merge'),
                nthreads=self._config["merge_threads"],
                merge_application=self._config["merge_application"],
                npart=self._config["npart"])
            log.debug("Running command: {0}".format(cmd))
            log.info("Staring EDDPolnMerge")
            self._merge_proc = ManagedProcess(cmd)
            self._subprocessMonitor.add(self._merge_proc,
                                        self._subprocess_error)

        ####################################################
        #STARTING MKRECV                                   #
        ####################################################
        cmd = "numactl -m {numa} taskset -c {cpu} mkrecv_v4 --header {dada_header} --nthreads {threads} --lst --quiet".format(
            numa=self.numa_number,
            cpu=self.__coreManager.get_coresstr('mkrecv'),
            threads=self._config["nstreams"],
            dada_header=self.dada_header_file.name)
        log.debug("Running command: {0}".format(cmd))
        log.info("Staring MKRECV")
        self._mkrecv_ingest_proc = ManagedProcess(
            cmd,
            stdout_handler=self._polarization_sensors[""]
            ["mkrecv_sensors"].stdout_handler)
        self._subprocessMonitor.add(self._mkrecv_ingest_proc,
                                    self._subprocess_error)

        ####################################################
        #STARTING ARCHIVE MONITOR                          #
        ####################################################
        if self._config["mode"] == "Timing":
            log.info("Staring archive monitor")
            self._archive_observer = Observer()
            self._archive_observer.daemon = False
            log.info("Input directory: {}".format(self.in_path))
            log.info("Output directory: {}".format(self.out_path))
            log.info("Setting up ArchiveAdder handler")
            self._handler = ArchiveAdder(self.out_path,
                                         self._config["zaplist"])
            self._archive_observer.schedule(self._handler,
                                            str(self.in_path),
                                            recursive=False)
            log.info("Starting directory monitor")
            self._archive_observer.start()
            self._png_monitor_callback = tornado.ioloop.PeriodicCallback(
                self._png_monitor, 5000)
            self._png_monitor_callback.start()
        else:
            self._folder_size_monitor_callback = tornado.ioloop.PeriodicCallback(
                self._folder_size_monitor, 5000)
            self._folder_size_monitor_callback.start()
        self._subprocessMonitor.start()
        self._timer = Time.now() - self._timer
        log.info("Took {} s to start".format(self._timer * 86400))

    @state_change(target="ready",
                  allowed=["measuring", "error"],
                  intermediate="measurement_stopping")
    @coroutine
    def measurement_stop(self):
        """@brief stop mkrecv, merging application, processing instances, reset DADA buffers."""
        self._timer = Time.now()
        if self._subprocessMonitor is not None:
            self._subprocessMonitor.stop()
        if self._config["mode"] == "Timing":
            try:
                self._archive_observer.stop()
            except Exception as E:
                log.error("Error stopping _archive_observer: {}".format(E))
            try:
                self._png_monitor_callback.stop()
            except Exception as E:
                log.error("Error stopping _png_monitor_callback: {}".format(E))
        else:
            try:
                self._folder_size_monitor_callback.stop()
            except Exception as E:
                log.error(
                    "Error stopping _folder_size_monitor_callback: {}".format(
                        E))
        if self._config["mode"] not in "Leap_baseband":
            process = [self._mkrecv_ingest_proc, self._merge_proc]
            for proc in process:
                proc.terminate(timeout=1)
        else:
            self._mkrecv_ingest_proc.terminate(timeout=1)
        if os.path.isfile("/tmp/t2pred.dat"):
            os.remove("/tmp/t2pred.dat")
        if os.path.isfile("./core"):
            os.remove("./core")
        log.info("reset DADA buffer")
        for k in self._dada_buffers_monitor:
            log.debug("Stopping DADA buffer monitor")
            k['monitor'].stop()
        self._dada_buffers_monitor = []
        for key in self._dada_buffers:
            self._create_ring_buffer(key, self.numa_number)
            yield
        del self._subprocessMonitor
        self._timer = Time.now() - self._timer
        log.info("Took {} s to stop".format(self._timer * 86400))

    @state_change(target="idle", intermediate="deconfiguring", error='panic')
    @coroutine
    def deconfigure(self):
        """@brief deconfigure the pipeline."""
        log.debug("Destroying dada buffers")

        for k in self._dada_buffers_monitor:
            cmd = "dada_db -d -k {0}".format(k)
            log.debug("Running command: {0}".format(cmd))
            yield command_watcher(cmd, allow_fail=True)
        self._dada_buffers_monitor = []
class GatedFullStokesSpectrometerPipeline(EDDPipeline):
    """Full Stokes Spectrometer 
    """
    def __init__(self, ip, port):
        """initialize the pipeline."""
        EDDPipeline.__init__(self, ip, port, _DEFAULT_CONFIG)
        self.mkrec_cmd = []
        self._dada_buffers = []
        self.__dada_key = "dada"  # key of inpt buffer, output is inverse

    def setup_sensors(self):
        """
        Setup monitoring sensors
        """
        EDDPipeline.setup_sensors(self)

        self._integration_time_status = Sensor.float(
            "integration-time",
            description="Integration time [s]",
            initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._integration_time_status)

        self._output_rate_status = Sensor.float(
            "output-rate",
            description="Output data rate [Gbyte/s]",
            initial_status=Sensor.UNKNOWN)
        self.add_sensor(self._output_rate_status)

        self._mkrecv_sensors = MkrecvSensors("")

        for s in self._mkrecv_sensors.sensors.values():
            self.add_sensor(s)

        self._input_buffer_fill_level = Sensor.float(
            "input-buffer-fill-level",
            description="Fill level of the input buffer",
            params=[0, 1])
        self.add_sensor(self._input_buffer_fill_level)

        self._input_buffer_total_write = Sensor.float(
            "input-buffer-total-write",
            description="Total write into input buffer ",
            params=[0, 1])
        self.add_sensor(self._input_buffer_total_write)

        self._output_buffer_fill_level = Sensor.float(
            "output-buffer-fill-level",
            description="Fill level of the output buffer")
        self.add_sensor(self._output_buffer_fill_level)
        self._output_buffer_total_read = Sensor.float(
            "output-buffer-total-read",
            description="Total read from output buffer")
        self.add_sensor(self._output_buffer_total_read)

    @coroutine
    def _create_ring_buffer(self, bufferSize, blocks, key, numa_node):
        """
         Create a ring buffer of given size with given key on specified numa node.
         Adds and register an appropriate sensor to thw list
         """
        # always clear buffer first. Allow fail here
        yield command_watcher("dada_db -d -k {key}".format(key=key),
                              allow_fail=True)

        cmd = "numactl --cpubind={numa_node} --membind={numa_node} dada_db -k {key} -n {blocks} -b {bufferSize} -p -l".format(
            key=key, blocks=blocks, bufferSize=bufferSize, numa_node=numa_node)
        log.debug("Running command: {0}".format(cmd))
        yield command_watcher(cmd)

        M = DbMonitor(key, self._buffer_status_handle)
        M.start()
        self._dada_buffers.append({'key': key, 'monitor': M})

    def _buffer_status_handle(self, status):
        """
        Process a change in the buffer status
        """
        if status['key'] == self.__dada_key:
            self._input_buffer_total_write.set_value(status['written'])
            self._input_buffer_fill_level.set_value(status['fraction-full'])

        elif status['key'] == self.__dada_key[::-1]:
            self._output_buffer_fill_level.set_value(status['fraction-full'])
            self._output_buffer_total_read.set_value(status['read'])

    @state_change(target="configured",
                  allowed=["idle"],
                  intermediate="configuring")
    @coroutine
    def configure(self, config_json):
        """
        Configure the EDD gated spectrometer

        Args:
            config_json:    A JSON dictionary object containing configuration information
        """
        log.info("Configuring EDD backend for processing")
        log.debug("Configuration string: '{}'".format(config_json))

        yield self.set(config_json)

        cfs = json.dumps(self._config, indent=4)
        log.info("Final configuration:\n" + cfs)

        self.__numa_node_pool = []
        # remove numa nodes with missing capabilities
        for node in numa.getInfo():
            if len(numa.getInfo()[node]['gpus']) < 1:
                log.debug(
                    "Not enough gpus on numa node {} - removing from pool.".
                    format(node))
                continue
            elif len(numa.getInfo()[node]['net_devices']) < 1:
                log.debug(
                    "Not enough nics on numa node {} - removing from pool.".
                    format(node))
                continue
            else:
                self.__numa_node_pool.append(node)

        log.debug("{} numa nodes remaining in pool after constraints.".format(
            len(self.__numa_node_pool)))

        if len(self.__numa_node_pool) == 0:
            if self._config['nonfatal_numacheck']:
                log.warning("Not enough numa nodes to process data!")
                self.__numa_node_pool = numa.getInfo().keys()
            else:
                raise FailReply("Not enough numa nodes to process data!")

        self._subprocessMonitor = SubprocessMonitor()

        if len(self._config['input_data_streams']) != 2:
            raise FailReply("Require 2 polarization input, got {}".format(
                len(self._config['input_data_streams'])))

        log.debug("Merging ip ranges")
        self.stream_description = copy.deepcopy(
            self._config['input_data_streams'].items()[0][1])
        self.stream_description["ip"] += ",{}".format(
            self._config['input_data_streams'].items()[1][1]["ip"])

        log.debug("Merged ip ranges: {}".format(self.stream_description["ip"]))

        self.input_heapSize = self.stream_description[
            "samples_per_heap"] * self.stream_description['bit_depth'] // 8

        nHeaps = self._config["samples_per_block"] // self.stream_description[
            "samples_per_heap"]
        input_bufferSize = nHeaps * (self.input_heapSize + 64 // 8)
        log.info('Input dada parameters created from configuration:\n\
                heap size:        {} byte\n\
                heaps per block:  {}\n\
                buffer size:      {} byte'.format(self.input_heapSize, nHeaps,
                                                  input_bufferSize))

        # calculate output buffer parameters
        nSlices = max(
            self._config["samples_per_block"] // 2 //
            self._config['fft_length'] // self._config['naccumulate'], 1)
        nChannels = self._config['fft_length'] // 2 + 1
        # on / off spectrum  + one side channel item per spectrum

        output_bufferSize = nSlices * (8 * (nChannels * 32 // 8 + 2 * 8))

        output_heapSize = nChannels * 32 // 8
        integrationTime = self._config['fft_length'] * self._config[
            'naccumulate'] / (float(self.stream_description["sample_rate"]))
        self._integration_time_status.set_value(integrationTime)
        rate = output_heapSize / integrationTime  # in spead documentation BYTE per second and not bit!
        rate *= self._config[
            "output_rate_factor"]  # set rate to (100+X)% of expected rate
        self._output_rate_status.set_value(rate / 1E9)

        log.info('Output parameters calculated from configuration:\n\
                spectra per block:  {} \n\
                nChannels:          {} \n\
                buffer size:        {} byte \n\
                integrationTime :   {} s \n\
                heap size:          {} byte\n\
                rate ({:.0f}%):        {} Gbps'.format(
            nSlices, nChannels, output_bufferSize, integrationTime,
            output_heapSize, self._config["output_rate_factor"] * 100,
            rate / 1E9))

        numa_node = self.__numa_node_pool[0]
        log.debug("Associating with numa node {}".format(numa_node))

        # configure dada buffer
        yield self._create_ring_buffer(input_bufferSize, 64, self.__dada_key,
                                       numa_node)

        ofname = self.__dada_key[::-1]
        # we write nSlice blocks on each go
        yield self._create_ring_buffer(output_bufferSize, 8 * nSlices, ofname,
                                       numa_node)

        ## specify all subprocesses
        self.__coreManager = CoreManager(numa_node)
        self.__coreManager.add_task("gated_spectrometer", 1)

        N_inputips = 0
        for p in self.stream_description["ip"].split(','):
            N_inputips += len(ipstring_to_list(p))
        log.debug("Found {} input ips".format(N_inputips))

        if not self._config["dummy_input"]:
            self.__coreManager.add_task("mkrecv",
                                        N_inputips + 1,
                                        prefere_isolated=True)

        if self._config["output_type"] == "network":
            self.__coreManager.add_task("mksend", 2)

        # Configure + launch
        cmd = "taskset -c {physcpu} gated_spectrometer --nsidechannelitems=1 --input_key={dada_key} --speadheap_size={heapSize} --selected_sidechannel=0 --nbits={bit_depth} --fft_length={fft_length} --naccumulate={naccumulate} -o {ofname} --log_level={log_level} --output_format=Stokes  --input_polarizations=Dual --output_type=dada".format(
            dada_key=self.__dada_key,
            ofname=ofname,
            heapSize=self.input_heapSize,
            numa_node=numa_node,
            bit_depth=self.stream_description['bit_depth'],
            physcpu=self.__coreManager.get_coresstr('gated_spectrometer'),
            **self._config)
        log.debug("Command to run: {}".format(cmd))

        cudaDevice = numa.getInfo()[numa_node]['gpus'][0]
        gated_cli = ManagedProcess(cmd,
                                   env={"CUDA_VISIBLE_DEVICES": cudaDevice})
        log.debug("Visble Cuda Device: {}".format(cudaDevice))
        self._subprocessMonitor.add(gated_cli, self._subprocess_error)
        self._subprocesses.append(gated_cli)

        cfg = self._config.copy()
        cfg.update(self.stream_description)
        cfg["dada_key"] = self.__dada_key

        ip_range = []
        port = set()
        for key in self._config["output_data_streams"]:
            ip_range.append(self._config["output_data_streams"][key]['ip'])
            port.add(self._config["output_data_streams"][key]['port'])
        if len(port) != 1:
            raise FailReply("Output data has to be on the same port! ")

        if self._config["output_type"] == 'network':
            mksend_header_file = tempfile.NamedTemporaryFile(delete=False)
            mksend_header_file.write(_mksend_header)
            mksend_header_file.close()

            nhops = len(ip_range)

            timestep = cfg["fft_length"] * cfg["naccumulate"]
            #select network interface
            fastest_nic, nic_params = numa.getFastestNic(numa_node)
            heap_id_start = 0  #2 * i    # two output spectra per pol

            log.info("Sending data on NIC {} [ {} ] @ {} Mbit/s".format(
                fastest_nic, nic_params['ip'], nic_params['speed']))
            cmd = "taskset -c {physcpu} mksend --header {mksend_header} --heap-id-start {heap_id_start} --dada-key {ofname} --ibv-if {ibv_if} --port {port_tx} --sync-epoch {sync_time} --sample-clock {sample_rate} --item1-step {timestep} --item4-list {fft_length} --item6-list {sync_time} --item7-list {sample_rate} --item8-list {naccumulate} --rate {rate} --heap-size {heap_size} --nhops {nhops} {mcast_dest}".format(
                mksend_header=mksend_header_file.name,
                heap_id_start=heap_id_start,
                timestep=timestep,
                ofname=ofname,
                nChannels=nChannels,
                physcpu=self.__coreManager.get_coresstr('mksend'),
                integrationTime=integrationTime,
                rate=rate,
                nhops=nhops,
                heap_size=output_heapSize,
                ibv_if=nic_params['ip'],
                mcast_dest=" ".join(ip_range),
                port_tx=port.pop(),
                **cfg)
            log.debug("Command to run: {}".format(cmd))

        elif self._config["output_type"] == 'disk':
            ofpath = os.path.join(cfg["output_directory"], ofname)
            log.debug("Writing output to {}".format(ofpath))
            if not os.path.isdir(ofpath):
                os.makedirs(ofpath)
            cmd = "dada_dbdisk -k {ofname} -D {ofpath} -W".format(
                ofname=ofname, ofpath=ofpath, **cfg)
        else:
            log.warning("Selected null output. Not sending data!")
            cmd = "dada_dbnull -z -k {}".format(ofname)

        mks = ManagedProcess(cmd, env={"CUDA_VISIBLE_DEVICES": cudaDevice})
        self._subprocessMonitor.add(mks, self._subprocess_error)
        self._subprocesses.append(mks)

        self._subprocessMonitor.start()

    @state_change(target="streaming",
                  allowed=["configured"],
                  intermediate="capture_starting")
    @coroutine
    def capture_start(self, config_json=""):
        """
        start streaming of spectrometer output.
        """
        log.info("Starting EDD backend")
        try:

            mkrecvheader_file = tempfile.NamedTemporaryFile(delete=False)
            log.debug("Creating mkrec header file: {}".format(
                mkrecvheader_file.name))
            mkrecvheader_file.write(_mkrecv_header)
            # DADA may need this
            # ToDo: Check for input stream definitions
            mkrecvheader_file.write("NBIT {}\n".format(
                self.stream_description["bit_depth"]))
            mkrecvheader_file.write("HEAP_SIZE {}\n".format(
                self.input_heapSize))

            mkrecvheader_file.write("\n#OTHER PARAMETERS\n")
            mkrecvheader_file.write("samples_per_block {}\n".format(
                self._config["samples_per_block"]))

            mkrecvheader_file.write(
                "\n#PARAMETERS ADDED AUTOMATICALLY BY MKRECV\n")
            mkrecvheader_file.close()

            cfg = copy.deepcopy(self._config)
            cfg.update(self.stream_description)
            cfg["dada_key"] = self.__dada_key
            if not self._config['dummy_input']:
                numa_node = self.__numa_node_pool[0]
                fastest_nic, nic_params = numa.getFastestNic(numa_node)
                log.info("Receiving data on NIC {} [ {} ] @ {} Mbit/s".format(
                    fastest_nic, nic_params['ip'], nic_params['speed']))

                if self._config[
                        'idx1_modulo'] == 'auto':  # Align along output ranges
                    idx1modulo = self._config['fft_length'] * self._config[
                        'naccumulate'] // self.stream_description[
                            'samples_per_heap']
                else:
                    idx1modulo = self._config['idx1_modulo']

                cmd = "taskset -c {physcpu} mkrecv_v4 --quiet --lst --header {mkrecv_header} --idx1-step {samples_per_heap} --heap-size {input_heap_size} --idx1-modulo {idx1modulo}  --nthreads {nthreads}\
                --dada-key {dada_key} --sync-epoch {sync_time} --sample-clock {sample_rate} \
                --ibv-if {ibv_if} --port {port} {ip}".format(
                    mkrecv_header=mkrecvheader_file.name,
                    physcpu=self.__coreManager.get_coresstr('mkrecv'),
                    ibv_if=nic_params['ip'],
                    input_heap_size=self.input_heapSize,
                    idx1modulo=idx1modulo,
                    nthreads=len(self.__coreManager.get_cores('mkrecv')) - 1,
                    **cfg)
                mk = ManagedProcess(
                    cmd, stdout_handler=self._mkrecv_sensors.stdout_handler)
            else:
                log.warning(
                    "Creating Dummy input instead of listening to network!")
                cmd = "dada_junkdb -c 1 -R 1000 -t 3600 -k {dada_key} {mkrecv_header}".format(
                    mkrecv_header=mkrecvheader_file.name, **cfg)

                mk = ManagedProcess(cmd)

            self.mkrec_cmd.append(mk)
            self._subprocessMonitor.add(mk, self._subprocess_error)
            self._subprocesses.append(mk)

        except Exception as E:
            log.error("Error starting pipeline: {}".format(E))
            raise E
        else:
            self.__watchdogs = []
            wd = SensorWatchdog(self._input_buffer_total_write,
                                10 * self._integration_time_status.value(),
                                self.watchdog_error)
            wd.start()
            self.__watchdogs.append(wd)
            # Wait for one integration period before finishing to ensure
            # streaming has started before OK
            yield sleep(self._integration_time_status.value())

    @state_change(target="idle",
                  allowed=["streaming", "deconfiguring"],
                  intermediate="capture_stopping")
    @coroutine
    def capture_stop(self):
        """
        @brief Stop streaming of data
        """
        log.info("Stoping EDD backend")
        for wd in self.__watchdogs:
            wd.stop_event.set()
            yield
        if self._subprocessMonitor is not None:
            self._subprocessMonitor.stop()
            yield

        # stop mkrec process
        log.debug("Stopping mkrecv processes ...")
        for proc in self.mkrec_cmd:
            proc.terminate()
            yield
        # This will terminate also the gated spectromenter automatically

        yield self.deconfigure()

    @state_change(target="idle", intermediate="deconfiguring", error='panic')
    @coroutine
    def deconfigure(self):
        """
        @brief deconfigure the gated spectrometer pipeline.
        """
        log.info("Deconfiguring EDD backend")
        if self.previous_state == 'streaming':
            yield self.capture_stop()

        if self._subprocessMonitor is not None:
            yield self._subprocessMonitor.stop()
        for proc in self._subprocesses:
            yield proc.terminate()

        self.mkrec_cmd = []

        log.debug("Destroying dada buffers")
        for k in self._dada_buffers:
            k['monitor'].stop()
            cmd = "dada_db -d -k {0}".format(k['key'])
            log.debug("Running command: {0}".format(cmd))
            yield command_watcher(cmd, allow_fail=True)

        self._dada_buffers = []