Beispiel #1
0
 def deconfigure(self, description):
     """Deconfigure the current subarray. Release hosts for future use with 
     other subarrays. 
     """
     # Fetch hosts allocated to this subarray:
     # Note description equivalent to product_id here
     array_key = 'coordinator:allocated_hosts:{}'.format(description)
     allocated_hosts = self.red.lrange(array_key, 0,
                                       self.red.llen(array_key))
     # Build list of Hashpipe-Redis Gateway channels to publish to:
     chan_list = self.host_list(HPGDOMAIN, allocated_hosts)
     # Send deconfigure message to these specific hosts:
     for i in range(len(chan_list)):
         self.pub_gateway_msg(self.red, chan_list[i], 'DESTIP', '0.0.0.0',
                              log, False)
     log.info('Subarray {} deconfigured'.format(description))
     # Release hosts:
     # NOTE: in future, get rid of write_list_redis function and append or pop.
     # This will simplify this step.
     # Get list of currently available hosts:
     free_hosts = self.red.lrange('coordinator:free_hosts', 0,
                                  self.red.llen('coordinator:free_hosts'))
     # Append released hosts and write
     free_hosts = free_hosts + allocated_hosts
     redis_tools.write_list_redis(self.red, 'coordinator:free_hosts',
                                  free_hosts)
     # Remove resources from current subarray
     self.red.delete('coordinator:allocated_hosts:{}'.format(description))
     log.info("Released {} hosts; {} hosts available".format(
         len(allocated_hosts), len(free_hosts)))
def main():
    log = set_logger()
    log.info("Starting Katportal Client")

    client = BLKATPortalClient()
    signal.signal(signal.SIGINT, lambda sig, frame: on_shutdown())
    client.start()
Beispiel #3
0
 def ip_offset(self, product_id):
     """Get IP offset (for ingesting fractions of the band)
     """
     try:
         offset = int(self.red.get('{}:ip_offset'.format(product_id)))
         if (offset > 0):
             log.info('Stream IP offset applied: {}'.format(offset))
     except:
         log.info("No stream IP offset; defaulting to 0")
         offset = 0
     return offset
Beispiel #4
0
    def parse_redis_msg(self, message):
        """Split message only twice - the format is as follows:
           message_type:description:value
           OR message_type:description (if there is no associated value)

           If message does not appear to fit format, return it as is. 
           If message does f
        """
        msg_type = ''
        description = ''
        value = ''
        msg_parts = message['data'].split(':', 2)
        if len(msg_parts) < 2:
            log.info("Not processing this message: {}".format(message))
        else:
            msg_type = msg_parts[0]
            description = msg_parts[1]
        if (len(msg_parts) > 2):
            value = msg_parts[2]
        return msg_type, description, value
Beispiel #5
0
def main(port):
    FORMAT = "[ %(levelname)s - %(asctime)s - %(filename)s:%(lineno)s] %(message)s"
    # logger = logging.getLogger('reynard')
    logging.basicConfig(format=FORMAT)
    log.setLevel(logging.DEBUG)
    log.info("Starting distributor")
    red = redis.StrictRedis(port=port)
    ps = red.pubsub(ignore_subscribe_messages=True)
    ps.subscribe(CHANNEL)
    try:
        for message in ps.listen():
            msg_parts = message['data'].split(':')
            if len(msg_parts) != 2:
                log.info("Not processing this message --> {}".format(message))
                continue
            msg_type = msg_parts[0]
            product_id = msg_parts[1]
            if msg_type == 'configure':
                all_streams = json.loads(json_str_formatter(red.get("{}:streams".format(product_id))))
                streams = all_streams[STREAM_TYPE]
                addr_list, port = parse_spead_addresses(streams.values()[0])
                nstreams = len(addr_list)
                if nstreams > NCHANNELS:
                    log.warning("More than {} ({}) stream addresses found".format(NCHANNELS, nstreams))
                for i in range(min(nstreams, NCHANNELS)):
                    msg = "{}:configure:stream:{}".format(product_id, addr_list[i])
                    red.publish(CHANNELS[i], msg)
    except KeyboardInterrupt:
        log.info("Stopping distributor")
        sys.exit(0)
    except Exception as e:
        log.error(e)
        sys.exit(1)
Beispiel #6
0
def main(port, cfg_file, triggermode):
    log = set_logger(log_level=logging.DEBUG)
    log.info("Starting Coordinator")
    coord = Coordinator(port, cfg_file, triggermode)
    signal.signal(signal.SIGINT, lambda sig, frame: on_shutdown())
    coord.start()
Beispiel #7
0
def on_shutdown():
    log.info("Coordinator shutting down.")
    sys.exit()
Beispiel #8
0
    def start(self):
        """Start the coordinator as follows:

           - The list of available Hashpipe instances and the number of streams per 
             instance are retrieved from the main configuration .yml file. 

           - The number of available instances/hosts is read from the appropriate Redis key. 
           
           - Subscriptions are made to the three Redis channels.  
           
           - Incoming messages trigger the appropriate function for the stage of the 
             observation. The contents of the messages (if any) are sent to the 
             appropriate function. 
        """
        # Configure coordinator
        try:
            self.hashpipe_instances, self.streams_per_instance = self.config(
                self.cfg_file)
            log.info('Configured from {}'.format(self.cfg_file))
        except:
            log.warning(
                'Configuration not updated; old configuration might be present.'
            )
        # Attempt to read list of available hosts. If key does not exist, recreate from
        # config file
        free_hosts = self.red.lrange('coordinator:free_hosts', 0,
                                     self.red.llen('coordinator:free_hosts'))
        if (len(free_hosts) == 0):
            redis_tools.write_list_redis(self.red, 'coordinator:free_hosts',
                                         self.hashpipe_instances)
            log.info(
                'First configuration - no list of available hosts. Retrieving from config file.'
            )
        # Subscribe to the required Redis channels.
        ps = self.red.pubsub(ignore_subscribe_messages=True)
        ps.subscribe(ALERTS_CHANNEL)
        ps.subscribe(SENSOR_CHANNEL)
        ps.subscribe(TRIGGER_CHANNEL)
        # Process incoming Redis messages:
        try:
            for msg in ps.listen():
                msg_type, description, value = self.parse_redis_msg(msg)
                # If trigger mode is changed on the fly:
                if ((msg_type == 'coordinator') &
                    (description == 'trigger_mode')):
                    self.triggermode = value
                    self.red.set('coordinator:trigger_mode', value)
                    log.info('Trigger mode set to \'{}\''.format(value))
                # If all the sensor values required on configure have been
                # successfully fetched by the katportalserver
                elif (msg_type == 'conf_complete'):
                    self.conf_complete(description)
                # If the current subarray is deconfigured, instruct processing nodes
                # to unsubscribe from their respective streams.
                # Only instruct processing nodes in the current subarray to unsubscribe.
                # Likewise, release hosts only for the current subarray.
                elif (msg_type == 'deconfigure'):
                    self.deconfigure(description)
                # Handle the full data-suspect bitmask, one bit per polarisation
                # per F-engine.
                elif (msg_type == 'data-suspect'):
                    self.data_suspect(description, value)
                # If the current subarray has transitioned to 'track' - that is,
                # the antennas are on source and trackign successfully.
                elif (msg_type == 'tracking'):
                    # Note that the description field is equivalent to product_id
                    # here:
                    self.tracking_start(description)
                # If the current subarray transitions out of the tracking state:
                elif (msg_type == 'not-tracking'):
                    self.tracking_stop(description)
                # If pointing updates are received during tracking
                elif ('pos_request_base' in description):
                    self.pointing_update(msg_type, description, value)
        except KeyboardInterrupt:
            log.info("Stopping coordinator")
            sys.exit(0)
        except Exception as e:
            log.error(e)
            sys.exit(1)
Beispiel #9
0
    def tracking_start(self, product_id):
        """When a subarray is on source and begins tracking, and the F-engine
           data is trustworthy, this function instructs the processing nodes
           to begin recording data.

           Data recording is initiated by issuing a PKTSTART value to the 
           processing nodes in question via the Hashpipe-Redis gateway [1].
          
           In addition, other appropriate metadata is published to the 
           processing nodes via the Hashpipe-Redis gateway. 

           Args:
               
               product_id (str): name of current subarray. 
           
           [1] https://arxiv.org/pdf/1906.07391.pdf
        """
        # Get list of allocated hosts for this subarray:
        array_key = 'coordinator:allocated_hosts:{}'.format(product_id)
        allocated_hosts = self.red.lrange(array_key, 0,
                                          self.red.llen(array_key))
        # Build list of Hashpipe-Redis Gateway channels to publish to:
        chan_list = self.host_list(HPGDOMAIN, allocated_hosts)
        # Send messages to these specific hosts:
        datadir = self.datadir(product_id)
        for i in range(len(chan_list)):
            # Publish DATADIR to gateway
            self.pub_gateway_msg(self.red, chan_list[i], 'DATADIR', datadir,
                                 log, False)
            # Target information:
            target_str, ra_str, dec_str = self.target(product_id)
            # SRC_NAME:
            self.pub_gateway_msg(self.red, chan_list[i], 'SRC_NAME',
                                 target_str, log, False)
            # RA_STR and DEC_STR
            self.pub_gateway_msg(self.red, chan_list[i], 'RA_STR', ra_str, log,
                                 False)
            self.pub_gateway_msg(self.red, chan_list[i], 'DEC_STR', dec_str,
                                 log, False)
        # Set PKTSTART separately after all the above messages have
        # all been delivered:
        pkt_idx_start = self.get_start_idx(allocated_hosts, PKTIDX_MARGIN, log)
        for i in range(len(chan_list)):
            self.pub_gateway_msg(self.red, chan_list[i], 'PKTSTART',
                                 pkt_idx_start, log, False)
        # Alert via slack:
        slack_message = "{}::meerkat:: New recording started for {}!".format(
            SLACK_CHANNEL, product_id)
        self.red.publish(PROXY_CHANNEL, slack_message)
        # If armed, reset triggermode to idle after triggering
        # once.
        # NOTE: need to fix triggermode retrieval? Perhaps done?
        triggermode = self.red.get(
            'coordinator:trigger_mode:{}'.format(product_id))
        if (triggermode == 'armed'):
            self.red.set('coordinator:trigger_mode:{}'.format(product_id),
                         'idle')
            log.info(
                'Triggermode set to \'idle\' from \'armed\' from {}'.format(
                    product_id))
        elif ('nshot' in triggermode):
            nshot = triggermode.split(':')
            n = int(nshot[1]) - 1
            triggermode = '{}:{}'.format(nshot[0], n)
            # If nshot mode, decrement nshot by one and write to Redis.
            self.red.set('coordinator:trigger_mode:{}'.format(product_id),
                         triggermode)
            log.info('Triggermode: n shots remaining: {}'.format(n))
            if (n <= 0):
                # Set triggermode to idle.
                triggermode = 'idle'
                self.red.set('coordinator:trigger_mode:{}'.format(product_id),
                             'idle')
                log.info('Triggermode set to \'idle\' from \'nshot\'')
        # Set subarray state to 'tracking'
        self.red.set('coordinator:tracking:{}'.format(product_id), '1')
Beispiel #10
0
    def conf_complete(self, description):
        """This function is run when a new subarray is configured and the 
           katportal_server has retrieved all the associated metadata required 
           for the processing nodes to ingest and record data from the F-engines. 

           The required metadata is published to the Hashpipe-Redis gateway in 
           the key-value pair format described in Appendix B of: 
           https://arxiv.org/pdf/1906.07391.pdf

           Notably, the DESTIP value is set for each processing node - the IP 
           address of the multicast group it is to join. 

           Args:
               
               description (str): the second field of the Redis message, which 
               in this case is the name of the current subarray. 

        """
        # This is the identifier for the subarray that has completed configuration.
        product_id = description
        tracking = 0  # Initialise tracking state to 0
        log.info('New subarray built: {}'.format(product_id))
        # Get IP address offset (if there is one) for ingesting only a specific
        # portion of the full band.
        offset = self.ip_offset(product_id)
        # Initialise trigger mode (idle, armed or auto)
        self.red.set('coordinator:trigger_mode:{}'.format(description),
                     self.triggermode)
        log.info('Trigger mode for {} on startup: {}'.format(
            description, self.triggermode))
        # Generate list of stream IP addresses and publish appropriate messages to
        # processing nodes:
        addr_list, port, n_addrs, n_red_chans = self.ip_addresses(
            product_id, offset)
        # Allocate hosts:
        free_hosts = self.red.lrange('coordinator:free_hosts', 0,
                                     self.red.llen('coordinator:free_hosts'))
        # Allocate hosts for the current subarray:
        if (len(free_hosts) == 0):
            log.warning(
                "No free resources, cannot process data from {}".format(
                    product_id))
        else:
            allocated_hosts = free_hosts[0:n_red_chans]
            redis_tools.write_list_redis(
                self.red, 'coordinator:allocated_hosts:{}'.format(product_id),
                allocated_hosts)
            # Remove allocated hosts from list of available hosts
            # NOTE: in future, append/pop with Redis commands instead of write_list_redis
            if (len(free_hosts) < n_red_chans):
                log.warning(
                    "Insufficient resources to process full band for {}".
                    format(product_id))
                free_hosts = []  # Empty
            else:
                free_hosts = free_hosts[n_red_chans:]
            redis_tools.write_list_redis(self.red, 'coordinator:free_hosts',
                                         free_hosts)
            log.info('Allocated {} hosts to {}'.format(n_red_chans,
                                                       product_id))
            # Build list of Hashpipe-Redis Gateway channels to publish to:
            chan_list = self.host_list(HPGDOMAIN, allocated_hosts)
            # Apply to processing nodes
            # NOTE: can we address multiple processing nodes more easily?
            for i in range(len(chan_list)):
                # Port (BINDPORT)
                self.pub_gateway_msg(self.red, chan_list[i], 'BINDPORT', port,
                                     log, True)
                # Total number of streams (FENSTRM)
                self.pub_gateway_msg(self.red, chan_list[i], 'FENSTRM',
                                     n_addrs, log, True)
                # Sync time (UNIX, seconds)
                t_sync = self.sync_time(product_id)
                self.pub_gateway_msg(self.red, chan_list[i], 'SYNCTIME',
                                     t_sync, log, True)
                # Centre frequency (FECENTER)
                fecenter = self.centre_freq(product_id)
                self.pub_gateway_msg(self.red, chan_list[i], 'FECENTER',
                                     fecenter, log, True)
                # Total number of frequency channels (FENCHAN)
                n_freq_chans = self.red.get('{}:n_channels'.format(product_id))
                self.pub_gateway_msg(self.red, chan_list[i], 'FENCHAN',
                                     n_freq_chans, log, True)
                # Coarse channel bandwidth (from F engines)
                # Note: no sign information!
                # (CHAN_BW)
                chan_bw = self.coarse_chan_bw(product_id, n_freq_chans)
                self.pub_gateway_msg(self.red, chan_list[i], 'CHAN_BW',
                                     chan_bw, log, True)
                # Number of channels per substream (HNCHAN)
                hnchan = self.chan_per_substream(product_id)
                self.pub_gateway_msg(self.red, chan_list[i], 'HNCHAN', hnchan,
                                     log, True)
                # Number of spectra per heap (HNTIME)
                hntime = self.spectra_per_heap(product_id)
                self.pub_gateway_msg(self.red, chan_list[i], 'HNTIME', hntime,
                                     log, True)
                # Number of ADC samples per heap (HCLOCKS)
                adc_per_heap = self.samples_per_heap(product_id, hntime)
                self.pub_gateway_msg(self.red, chan_list[i], 'HCLOCKS',
                                     adc_per_heap, log, True)
                # Number of antennas (NANTS)
                n_ants = self.antennas(product_id)
                self.pub_gateway_msg(self.red, chan_list[i], 'NANTS', n_ants,
                                     log, True)
                # Set PKTSTART to 0 on configure
                self.pub_gateway_msg(self.red, chan_list[i], 'PKTSTART', 0,
                                     log, True)
                # Number of streams for instance i (NSTRM)
                n_streams_per_instance = int(addr_list[i][-1]) + 1
                self.pub_gateway_msg(self.red, chan_list[i], 'NSTRM',
                                     n_streams_per_instance, log, True)
                # Absolute starting channel for instance i (SCHAN)
                s_chan = offset * int(
                    hnchan) + i * n_streams_per_instance * int(hnchan)
                self.pub_gateway_msg(self.red, chan_list[i], 'SCHAN', s_chan,
                                     log, True)
                # Destination IP addresses for instance i (DESTIP)
                self.pub_gateway_msg(self.red, chan_list[i], 'DESTIP',
                                     addr_list[i], log, True)
def main(config):
    log = set_logger(log_level=logging.DEBUG)
    log.info("Starting Katportal Client")
    client = BLKATPortalClient(config)
    signal.signal(signal.SIGINT, lambda sig, frame: on_shutdown())
    client.start()
def on_shutdown():
    log.info("Shutting Down Katportal Clients")
    sys.exit()
def on_shutdown():
    # TODO: uncomment when you deploy
    # notify_slack("KATPortal module at MeerKAT has halted. Might want to check that!")
    log.info("Shutting Down Katportal Clients")
    sys.exit()