def waitForSMRB (self): db_id = self.cfg["RECEIVING_DATA_BLOCK"] db_prefix = self.cfg["DATA_BLOCK_PREFIX"] num_stream = self.cfg["NUM_STREAM"] self.db_key = SMRBDaemon.getDBKey (db_prefix, self.id, num_stream, db_id) # port of the SMRB daemon for this stream smrb_port = SMRBDaemon.getDBMonPort(self.id) # wait up to 30s for the SMRB to be created smrb_wait = 60 smrb_exists = False while not smrb_exists and smrb_wait > 0 and not self.quit_event.isSet(): self.log(2, "trying to open connection to SMRB") smrb_sock = sockets.openSocket (DL, "localhost", smrb_port, 1) if smrb_sock: smrb_sock.send ("smrb_status\r\n") junk = smrb_sock.recv (65536) smrb_sock.close() smrb_exists = True else: sleep (1) smrb_wait -= 1 return smrb_exists
def waitForSMRB(self): db_id = self.cfg["PROCESSING_DATA_BLOCK"] db_prefix = self.cfg["DATA_BLOCK_PREFIX"] num_stream = self.cfg["NUM_STREAM"] self.db_key = SMRBDaemon.getDBKey(db_prefix, self.id, num_stream, db_id) # port of the SMRB daemon for this stream smrb_port = SMRBDaemon.getDBMonPort(self.id) # wait up to 30s for the SMRB to be created smrb_wait = 60 smrb_exists = False while not smrb_exists and smrb_wait > 0 and not self.quit_event.isSet( ): self.log(2, "trying to open connection to SMRB") smrb_sock = sockets.openSocket(DL, "localhost", smrb_port, 1) if smrb_sock: smrb_sock.send("smrb_status\r\n") junk = smrb_sock.recv(65536) smrb_sock.close() smrb_exists = True else: sleep(1) smrb_wait -= 1 return smrb_exists
def request_deconfigure(self, req, msg): """Deconfigure for the data_product.""" self.script.log(1, "request_deconfigure()") # in case the observing was terminated early if self._data_product["state"] == "recording": (result, message) = self.target_stop() if self._data_product["state"] == "ready": (result, message) = self.capture_done() data_product_id = self._data_product["id"] # check if the data product was previously configured if not data_product_id == self._data_product["id"]: response = str( data_product_id ) + " did not match configured data product [" + self._data_product[ "id"] + "]" self.script.log(-1, "configure: " + response) return ("fail", response) # change the state (result, message) = self.change_state("deconfigure") if result != "ok": self.script.log(-1, "deconfigure: change_state failed: " + message) return (result, message) for istream in range(int(self.script.cfg["NUM_STREAM"])): (host, beam_idx, subband) = self.script.cfg["STREAM_" + str(istream)].split(":") if self.script.beam_name == self.script.cfg["BEAM_" + beam_idx]: # reset ADC_SYNC_TIME on the beam self.script.beam_config["lock"].acquire() self.script.beam_config["ADC_SYNC_TIME"] = "0" self.script.beam_config["lock"].release() port = int(self.script.cfg["STREAM_RECV_PORT"]) + istream self.script.log( 3, "configure: connecting to " + host + ":" + str(port)) sock = sockets.openSocket(DL, host, port, 1) if sock: req = "<?req version='1.0' encoding='ISO-8859-1'?>" req += "<recv_cmd>" req += "<command>deconfigure</command>" req += "</recv_cmd>" sock.send(req) recv_reply = sock.recv(65536) sock.close() # remove the data product self._data_product["id"] = "None" response = "data product " + str(data_product_id) + " deconfigured" self.script.log(1, "configure: " + response) return ("ok", response)
def request_deconfigure(self, req, msg): """Deconfigure for the data_product.""" self.script.log(1, "request_deconfigure()") # in case the observing was terminated early if self._data_product["state"] == "recording": (result, message) = self.target_stop () if self._data_product["state"] == "ready": (result, message) = self.capture_done() data_product_id = self._data_product["id"] # check if the data product was previously configured if not data_product_id == self._data_product["id"]: response = str(data_product_id) + " did not match configured data product [" + self._data_product["id"] + "]" self.script.log (-1, "configure: " + response) return ("fail", response) # change the state (result, message) = self.change_state ("deconfigure") if result != "ok": self.script.log (-1, "deconfigure: change_state failed: " + message) return (result, message) for istream in range(int(self.script.cfg["NUM_STREAM"])): (host, beam_idx, subband) = self.script.cfg["STREAM_" + str(istream)].split(":") if self.script.beam_name == self.script.cfg["BEAM_" + beam_idx]: # reset ADC_SYNC_TIME on the beam self.script.beam_config["lock"].acquire() self.script.beam_config["ADC_SYNC_TIME"] = "0"; self.script.beam_config["lock"].release() port = int(self.script.cfg["STREAM_RECV_PORT"]) + istream self.script.log (3, "configure: connecting to " + host + ":" + str(port)) sock = sockets.openSocket (DL, host, port, 1) if sock: req = "<?req version='1.0' encoding='ISO-8859-1'?>" req += "<recv_cmd>" req += "<command>deconfigure</command>" req += "</recv_cmd>" sock.send(req) recv_reply = sock.recv (65536) sock.close() # remove the data product self._data_product["id"] = "None" response = "data product " + str(data_product_id) + " deconfigured" self.script.log (1, "configure: " + response) return ("ok", response)
def request_target_start (self, req, data_product_id, target_name): """Commence data processing on specific data product and beam using target.""" self.script.log (1, "request_target_start(" + data_product_id + "," + target_name+")") self.script.beam_config["lock"].acquire() self.script.beam_config["ADC_SYNC_TIME"] = self.script.cam_config["ADC_SYNC_TIME"] self.script.beam_config["OBSERVER"] = self.script.cam_config["OBSERVER"] self.script.beam_config["ANTENNAE"] = self.script.cam_config["ANTENNAE"] self.script.beam_config["SCHEDULE_BLOCK_ID"] = self.script.cam_config["SCHEDULE_BLOCK_ID"] self.script.beam_config["EXPERIMENT_ID"] = self.script.cam_config["EXPERIMENT_ID"] self.script.beam_config["DESCRIPTION"] = self.script.cam_config["DESCRIPTION"] self.script.beam_config["lock"].release() # check the pulsar specified is listed in the catalog (result, message) = self.test_pulsar_valid (target_name) if result != "ok": return (result, message) # check the ADC_SYNC_TIME is valid for this beam if self.script.beam_config["ADC_SYNC_TIME"] == "0": return ("fail", "ADC Synchronisation Time was not valid") # set the pulsar name, this should include a check if the pulsar is in the catalog self.script.beam_config["lock"].acquire() if self.script.beam_config["MODE"] == "CAL": target_name = target_name + "_R" self.script.beam_config["SOURCE"] = target_name self.script.beam_config["lock"].release() host = self.script.tcs_host port = self.script.tcs_port self.script.log (2, "request_target_start: opening socket for beam " + beam_id + " to " + host + ":" + str(port)) sock = sockets.openSocket (DL, host, int(port), 1) if sock: xml = self.script.get_xml_config() sock.send(xml + "\r\n") reply = sock.recv (65536) xml = self.script.get_xml_start_cmd() sock.send(xml + "\r\n") reply = sock.recv (65536) sock.close() return ("ok", "") else: return ("fail", "could not connect to TCS")
def request_deconfigure(self, req, msg): """Deconfigure for the data_product.""" if len(msg.arguments) == 0: self.script.log (-1, "request_configure: no arguments provided") return ("fail", "expected 1 argument") # the sub-array identifier data_product_id = msg.arguments[0] self.script.log (1, "configure: deconfiguring " + str(data_product_id)) # check if the data product was previously configured if not data_product_id == self._data_product["id"]: response = str(data_product_id) + " did not match configured data product [" + self._data_product["id"] + "]" self.script.log (-1, "configure: " + response) return ("fail", response) for istream in range(int(self.script.cfg["NUM_STREAM"])): (host, beam_idx, subband) = self.script.cfg["STREAM_" + str(istream)].split(":") if self.script.beam_name == self.script.cfg["BEAM_" + beam_idx]: # reset ADC_SYNC_TIME on the beam self.script.beam_config["lock"].acquire() self.script.beam_config["ADC_SYNC_TIME"] = "0"; self.script.beam_config["lock"].release() port = int(self.script.cfg["STREAM_RECV_PORT"]) + istream self.script.log (3, "configure: connecting to " + host + ":" + str(port)) sock = sockets.openSocket (DL, host, port, 1) if sock: req = "<?req version='1.0' encoding='ISO-8859-1'?>" req += "<recv_cmd>" req += "<command>deconfigure</command>" req += "</recv_cmd>" sock.send(req) recv_reply = sock.recv (65536) sock.close() # remove the data product self._data_product["id"] = "None" response = "data product " + str(data_product_id) + " deconfigured" self.script.log (1, "configure: " + response) return ("ok", response)
def getSMRBCapacity(stream_ids, quit_event, dl): smrbs = {} rval = 0 for stream_id in stream_ids: if quit_event.isSet(): continue port = SMRBDaemon.getDBMonPort(stream_id) sock = sockets.openSocket(dl, "localhost", port, 1) if sock: sock.send("smrb_status\n") data = sock.recv(65536) smrbs[stream_id] = json.loads(data) sock.close() return rval, smrbs
def connect (self, attempts=5): now = timegm(gmtime()) if (now - self.last_conn_attempt) > attempts: header = "<?xml version='1.0' encoding='ISO-8859-1'?>" + \ "<log_stream>" + \ "<host>" + self.host + "</host>" + \ "<source>" + self.source + "</source>" + \ "<dest>" + self.dest + "</dest>" + \ "<id type='" + self.type + "'>" + self.id + "</id>" + \ "</log_stream>" self.sock = sockets.openSocket (self.dl, self.host, int(self.port), 1) if self.sock: self.sock.send (header) junk = self.sock.recv(1) self.connected = True else: self.connected = False self.last_conn_attempt = now
def request_target_stop (self, req, data_product_id): """Cease data processing with target_name.""" self.script.log (1, "request_target_stop(" + data_product_id+")") self.script.beam_config["lock"].acquire() self.script.beam_config["SOURCE"] = "" self.script.beam_config["lock"].release() host = self.script.tcs_host port = self.script.tcs_port sock = sockets.openSocket (DL, host, int(port), 1) if sock: xml = self.script.get_xml_stop_cmd () sock.send(xml + "\r\n") reply = sock.recv (65536) sock.close() return ("ok", "") else: return ("fail", "could not connect to tcs[beam]")
def getSMRBCapacity (stream_ids, quit_event, dl): smrbs = {} rval = 0 for stream_id in stream_ids: if quit_event.isSet(): continue port = SMRBDaemon.getDBMonPort (stream_id) sock = sockets.openSocket (dl, "localhost", port, 1) if sock: try: sock.settimeout(1) sock.send ("smrb_status\n") data = sock.recv(65536) except socket.error, e: print "socket connection to SMRB failed" else: smrbs[stream_id] = json.loads(data) sock.close()
def issue_stop_cmd(self, xml): self.log(2, "issue_stop_cmd()") script.beam_cfg["obs_cmd"]["command"] = "stop" xml = xmltodict.unparse(script.beam_cfg) # convert dict into XML to send to spip_tcs sock = sockets.openSocket(DL, self.host, self.spip_tcs_port, 1) if sock: sock.send(xml + "\r\n") sock.close() else: self.log( 1, "TCSInterfaceDaemon::issue_stop_cmd could not conenct to spip_tcs" ) # reset the XML configuration to a default value self.configure_beam_state() time.sleep(1) return "ok"
def waitForSMRB (self, db_key, script): # port of the SMRB daemon for this stream smrb_port = SMRBDaemon.getDBMonPort(script.id) # wait up to 30s for the SMRB to be created smrb_wait = 60 smrb_exists = False while not smrb_exists and smrb_wait > 0 and not script.quit_event.isSet(): script.debug("trying to open connection to SMRB") smrb_sock = sockets.openSocket (DL, "localhost", smrb_port, 1) if smrb_sock: smrb_sock.send ("smrb_status\r\n") junk = smrb_sock.recv (65536) smrb_sock.close() smrb_exists = True else: sleep (1) smrb_wait -= 1 return smrb_exists
def waitForSMRB(self, db_key, script): # port of the SMRB daemon for this stream smrb_port = SMRBDaemon.getDBMonPort(script.id) # wait up to 30s for the SMRB to be created smrb_wait = 60 smrb_exists = False while not smrb_exists and smrb_wait > 0 and not script.quit_event.isSet( ): script.debug("trying to open connection to SMRB") smrb_sock = sockets.openSocket(DL, "localhost", smrb_port, 1) if smrb_sock: smrb_sock.send("smrb_status\r\n") junk = smrb_sock.recv(65536) smrb_sock.close() smrb_exists = True else: sleep(1) smrb_wait -= 1 return smrb_exists
def target_stop (self): if self._data_product["id"] == "None": return ("fail", "data product was not configured") # change the state (result, message) = self.change_state ("target_stop") if result != "ok": self.script.log (-1, "target_stop: change_state failed: " + message) return (result, message) self.script.reset_beam_config () host = self.script.tcs_host port = self.script.tcs_port sock = sockets.openSocket (DL, host, int(port), 1) if sock: xml = self.script.get_xml_stop_cmd () sock.send(xml + "\r\n") reply = sock.recv (65536) sock.close() return ("ok", "") else: return ("fail", "could not connect to tcs[beam]")
def target_stop(self): if self._data_product["id"] == "None": return ("fail", "data product was not configured") # change the state (result, message) = self.change_state("target_stop") if result != "ok": self.script.log(-1, "target_stop: change_state failed: " + message) return (result, message) self.script.reset_beam_config() host = self.script.tcs_host port = self.script.tcs_port sock = sockets.openSocket(DL, host, int(port), 1) if sock: xml = self.script.get_xml_stop_cmd() sock.send(xml + "\r\n") reply = sock.recv(65536) sock.close() return ("ok", "") else: return ("fail", "could not connect to tcs[beam]")
def main(self, id): # connect to the various scripts running to collect the information # to be provided by the KATCPServer instance as sensors time.sleep(2) while not self.quit_event.isSet(): self.log(3, "KATCPDaemon::main loop start") # the primary function of the KATCPDaemon is to update the # sensors in the DeviceServer periodically # TODO compute overall device status self.katcp._device_status.set_value("ok") # configure fixed sensors self.katcp._beam_sensors["input_channels"].set_value( self.input_nchan) # connect to SPIP_LMC to retreive temperature information if self.quit_event.isSet(): self.log( 2, "KATCPDaemon::main quit_event was set, exiting main 1") return (host, port) = self.lmc.split(":") self.log(3, "KATCPDaemon::main updating sensors from LMC") try: self.log( 3, "KATCPDaemon::main openSocket(" + host + "," + port + ")") sock = sockets.openSocket(DL, host, int(port), 1) self.log(3, "KATCPDaemon::main socket opened") if sock: sock.settimeout(1.0) sock.send(self.lmc_cmd) lmc_reply = sock.recv(65536) xml = xmltodict.parse(lmc_reply) sock.close() if self.quit_event.isSet(): self.log( 2, "KATCPDaemon::main quit_event was set, exiting main 2" ) return self.log( 3, "KATCPDaemon::main update_lmc_sensors(" + host + ",[xml])") self.update_lmc_sensors(host, xml) except socket.error as e: self.log(2, "KATCPDaemon::main socket error on LMC sensor read") if e.errno == errno.ECONNRESET: self.log(1, "lmc connection was unexpectedly closed") sock.close() except: self.log( 2, "KATCPDaemon::main other exception on LMC sensor read") sock.close() self.log(3, "KATCPDaemon::main received LMC data") # connect to SPIP_REPACK to retrieve Pulsar SNR performance if self.quit_event.isSet(): return self.log(3, "KATCPDaemon::main pulsar SNR sensor from REPACK") (host, port) = self.repack.split(":") try: sock = sockets.openSocket(DL, host, int(port), 1) if sock: sock.send(self.repack_cmd) repack_reply = sock.recv(65536) xml = xmltodict.parse(repack_reply) sock.close() if self.quit_event.isSet(): return self.log( 3, "KATCPDaemon::main update_repack_sensors(" + host + ",[xml])") self.update_repack_sensors(host, xml) except socket.error as e: if e.errno == errno.ECONNRESET: self.log(1, "repack connection was unexpectedly closed") sock.close() except: self.log( 2, "KATCPDaemon::main other exception on repack sensor read") sock.close() # connect to STAT (TBD) to retrieve beam-former power levels host = "None" xml = "None" self.update_stat_sensors(host, xml) self.log(3, "KATCPDaemon::main sleeping for 5 seconds") to_sleep = 5 while not self.quit_event.isSet() and to_sleep > 0: to_sleep -= 1 time.sleep(1) if not self.katcp.running(): self.log(-2, "KATCP server was not running, exiting") self.quit_event.set()
def setup_sensors_host (self, host, port): self.script.log(2, "KATCPServer::setup_sensors_host ("+host+","+port+")") sock = sockets.openSocket (DL, host, int(port), 1) if sock: self.script.log(2, "KATCPServer::setup_sensors_host sock.send(" + self.script.lmc_cmd + ")") sock.send (self.script.lmc_cmd + "\r\n") lmc_reply = sock.recv (65536) sock.close() xml = xmltodict.parse(lmc_reply) self._host_sensors = {} # Disk sensors self.script.log(2, "KATCPServer::setup_sensors_host configuring disk sensors") disk_prefix = host+".disk" self._host_sensors["disk_size"] = Sensor.float(disk_prefix+".size", description=host+": disk size", unit="MB", params=[8192,1e9], default=0) self._host_sensors["disk_available"] = Sensor.float(disk_prefix+".available", description=host+": disk available space", unit="MB", params=[1024,1e9], default=0) self.add_sensor(self._host_sensors["disk_size"]) self.add_sensor(self._host_sensors["disk_available"]) # Server Load sensors self.script.log(2, "KATCPServer::setup_sensors_host configuring load sensors") self._host_sensors["num_cores"] = Sensor.integer (host+".num_cores", description=host+": disk available space", unit="MB", params=[1,64], default=0) self._host_sensors["load1"] = Sensor.float(host+".load.1min", description=host+": 1 minute load ", unit="", default=0) self._host_sensors["load5"] = Sensor.float(host+".load.5min", description=host+": 5 minute load ", unit="", default=0) self._host_sensors["load15"] = Sensor.float(host+".load.15min", description=host+": 15 minute load ", unit="", default=0) self.add_sensor(self._host_sensors["num_cores"]) self.add_sensor(self._host_sensors["load1"]) self.add_sensor(self._host_sensors["load5"]) self.add_sensor(self._host_sensors["load15"]) cpu_temp_pattern = re.compile("cpu[0-9]+_temp") fan_speed_pattern = re.compile("fan[0-9,a-z]+") power_supply_pattern = re.compile("ps[0-9]+_status") self.script.log(2, "KATCPServer::setup_sensors_host configuring other metrics") if not xml["lmc_reply"]["sensors"] == None: for sensor in xml["lmc_reply"]["sensors"]["metric"]: name = sensor["@name"] if name == "system_temp": self._host_sensors[name] = Sensor.float((host+".system_temp"), description=host+": system temperature", unit="C", params=[-20,150], default=0) self.add_sensor(self._host_sensors[name]) if cpu_temp_pattern.match(name): (cpu, junk) = name.split("_") self._host_sensors[name] = Sensor.float((host+"." + name), description=host+": "+ cpu +" temperature", unit="C", params=[-20,150], default=0) self.add_sensor(self._host_sensors[name]) if fan_speed_pattern.match(name): self._host_sensors[name] = Sensor.float((host+"." + name), description=host+": "+name+" speed", unit="RPM", params=[0,20000], default=0) self.add_sensor(self._host_sensors[name]) if power_supply_pattern.match(name): self._host_sensors[name] = Sensor.boolean((host+"." + name), description=host+": "+name, unit="", default=0) self.add_sensor(self._host_sensors[name]) # TODO consider adding power supply sensors: e.g. # device-status-kronos1-powersupply1 # device-status-kronos1-powersupply2 # device-status-kronos2-powersupply1 # device-status-kronos2-powersupply2 # TODO consider adding raid/disk sensors: e.g. # device-status-<host>-raid # device-status-<host>-raid-disk1 # device-status-<host>-raid-disk2 self.script.log(2, "KATCPServer::setup_sensors_host done!") else: self.script.log(2, "KATCPServer::setup_sensors_host no sensors found") else: self.script.log(-2, "KATCPServer::setup_sensors_host: could not connect to LMC")
def request_configure(self, req, msg): """Prepare and configure for the reception of the data_product_id.""" self.script.log (1, "request_configure: nargs= " + str(len(msg.arguments)) + " msg=" + str(msg)) if len(msg.arguments) == 0: self.script.log (-1, "request_configure: no arguments provided") return ("ok", "configured data products: TBD") # the sub-array identifier data_product_id = msg.arguments[0] if len(msg.arguments) == 1: self.script.log (1, "request_configure: request for configuration of " + str(data_product_id)) if data_product_id == self._data_product["id"]: configuration = str(data_product_id) + " " + \ str(self._data_product['antennas']) + " " + \ str(self._data_product['n_channels']) + " " + \ str(self._data_product['cbf_source']) self.script.log (1, "request_configure: configuration of " + str(data_product_id) + "=" + configuration) return ("ok", configuration) else: self.script.log (-1, "request_configure: no configuration existed for " + str(data_product_id)) return ("fail", "no configuration existed for " + str(data_product_id)) if len(msg.arguments) == 4: # if the configuration for the specified data product matches extactly the # previous specification for that data product, then no action is required self.script.log (1, "configure: configuring " + str(data_product_id)) if data_product_id == self._data_product["id"] and \ self._data_product['antennas'] == msg.arguments[1] and \ self._data_product['n_channels'] == msg.arguments[2] and \ self._data_product['cbf_source'] == msg.arguments[3]: response = "configuration for " + str(data_product_id) + " matched previous" self.script.log (1, "configure: " + response) return ("ok", response) # the data product requires configuration else: self.script.log (1, "configure: new data product " + data_product_id) # determine which sub-array we are matched against the_sub_array = -1 for i in range(4): self.script.log (1, "configure: testing self.data_product_res[" + str(i) +"].match(" + data_product_id +")") if self.data_product_res[i].match (data_product_id): the_sub_array = i + 1 if the_sub_array == -1: self.script.log (1, "configure: could not match subarray from " + data_product_id) return ("fail", "could not data product to sub array") self.script.log (1, "configure: restarting pubsub for subarray " + str(the_sub_array)) self.script.pubsub.set_sub_array (the_sub_array, self.script.beam_name) self.script.pubsub.restart() antennas = msg.arguments[1] n_channels = msg.arguments[2] cbf_source = msg.arguments[3] # check if the number of existing + new beams > available (cfreq, bwd, nchan) = self.script.cfg["SUBBAND_CONFIG_0"].split(":") if nchan != n_channels: self._data_product.pop(data_product_id, None) response = "PTUSE configured for " + nchan + " channels" self.script.log (-1, "configure: " + response) return ("fail", response) self._data_product['id'] = data_product_id self._data_product['antennas'] = antennas self._data_product['n_channels'] = n_channels self._data_product['cbf_source'] = cbf_source # parse the CBF_SOURCE to determine multicast groups (addr, port) = cbf_source.split(":") (mcast, count) = addr.split("+") self.script.log (2, "configure: parsed " + mcast + "+" + count + ":" + port) if not count == "1": response = "CBF source did not match ip_address+1:port" self.script.log (-1, "configure: " + response) return ("fail", response) mcasts = ["",""] ports = [0, 0] quartets = mcast.split(".") mcasts[0] = ".".join(quartets) quartets[3] = str(int(quartets[3])+1) mcasts[1] = ".".join(quartets) ports[0] = int(port) ports[1] = int(port) self.script.log (1, "configure: connecting to RECV instance to update configuration") for istream in range(int(self.script.cfg["NUM_STREAM"])): (host, beam_idx, subband) = self.script.cfg["STREAM_" + str(istream)].split(":") beam = self.script.cfg["BEAM_" + beam_idx] if beam == self.script.beam_name: # reset ADC_SYNC_TIME on the beam self.script.beam_config["lock"].acquire() self.script.beam_config["ADC_SYNC_TIME"] = "0"; self.script.beam_config["lock"].release() port = int(self.script.cfg["STREAM_RECV_PORT"]) + istream self.script.log (3, "configure: connecting to " + host + ":" + str(port)) sock = sockets.openSocket (DL, host, port, 1) if sock: req = "<?req version='1.0' encoding='ISO-8859-1'?>" req += "<recv_cmd>" req += "<command>configure</command>" req += "<params>" req += "<param key='DATA_MCAST_0'>" + mcasts[0] + "</param>" req += "<param key='DATA_MCAST_1'>" + mcasts[1] + "</param>" req += "<param key='DATA_PORT_0'>" + str(ports[0]) + "</param>" req += "<param key='DATA_PORT_1'>" + str(ports[1]) + "</param>" req += "<param key='META_MCAST_0'>" + mcasts[0] + "</param>" req += "<param key='META_MCAST_1'>" + mcasts[1] + "</param>" req += "<param key='META_PORT_0'>" + str(ports[0]) + "</param>" req += "<param key='META_PORT_1'>" + str(ports[1]) + "</param>" req += "</params>" req += "</recv_cmd>" self.script.log (1, "configure: sending XML req") sock.send(req) recv_reply = sock.recv (65536) self.script.log (1, "configure: received " + recv_reply) sock.close() return ("ok", "data product " + str (data_product_id) + " configured") else: response = "expected 0, 1 or 4 arguments" self.script.log (-1, "configure: " + response) return ("fail", response)
def request_configure(self, req, msg): """Prepare and configure for the reception of the data_product_id.""" self.script.log( 1, "request_configure: nargs= " + str(len(msg.arguments)) + " msg=" + str(msg)) if len(msg.arguments) == 0: self.script.log(-1, "request_configure: no arguments provided") return ("ok", "configured data products: TBD") # the sub-array identifier data_product_id = msg.arguments[0] if len(msg.arguments) == 1: self.script.log( 1, "request_configure: request for configuration of " + str(data_product_id)) if data_product_id == self._data_product["id"]: configuration = str(data_product_id) + " " + \ str(self._data_product['antennas']) + " " + \ str(self._data_product['n_channels']) + " " + \ str(self._data_product['cbf_source']) + " " + \ str(self._data_product['proxy_name']) self.script.log( 1, "request_configure: configuration of " + str(data_product_id) + "=" + configuration) return ("ok", configuration) else: self.script.log( -1, "request_configure: no configuration existed for " + str(data_product_id)) return ("fail", "no configuration existed for " + str(data_product_id)) if len(msg.arguments) == 5: # if the configuration for the specified data product matches extactly the # previous specification for that data product, then no action is required self.script.log(1, "configure: configuring " + str(data_product_id)) if data_product_id == self._data_product["id"] and \ self._data_product['antennas'] == msg.arguments[1] and \ self._data_product['n_channels'] == msg.arguments[2] and \ self._data_product['cbf_source'] == str(msg.arguments[3]) and \ self._data_product['proxy_name'] == str(msg.arguments[4]): response = "configuration for " + str( data_product_id) + " matched previous" self.script.log(1, "configure: " + response) return ("ok", response) # the data product requires configuration else: self.script.log( 1, "configure: new data product " + data_product_id) # TODO decide what to do regarding preconfigured params (e.g. FREQ, BW) vs CAM supplied values # determine which sub-array we are matched against the_sub_array = -1 for i in range(4): self.script.log( 1, "configure: testing self.data_product_res[" + str(i) + "].match(" + data_product_id + ")") if self.data_product_res[i].match(data_product_id): the_sub_array = i + 1 if the_sub_array == -1: self.script.log( 1, "configure: could not match subarray from " + data_product_id) return ("fail", "could not data product to sub array") antennas = msg.arguments[1] n_channels = msg.arguments[2] cbf_source = str(msg.arguments[3]) streams = json.loads(msg.arguments[3]) proxy_name = str(msg.arguments[4]) self.script.log(2, "configure: streams=" + str(streams)) # check if the number of existing + new beams > available # (cfreq, bwd, nchan1) = self.script.cfg["SUBBAND_CONFIG_0"].split(":") # (cfreq, bwd, nchan2) = self.script.cfg["SUBBAND_CONFIG_1"].split(":") # nchan = int(nchan1) + int(nchan2) #if nchan != int(n_channels): # self._data_product.pop(data_product_id, None) # response = "PTUSE configured for " + str(nchan) + " channels" # self.script.log (-1, "configure: " + response) # return ("fail", response) self._data_product['id'] = data_product_id self._data_product['antennas'] = antennas self._data_product['n_channels'] = n_channels self._data_product['cbf_source'] = cbf_source self._data_product['streams'] = str(streams) self._data_product['proxy_name'] = proxy_name self._data_product['state'] = "unconfigured" # change the state (result, message) = self.change_state("configure") if result != "ok": self.script.log( -1, "configure: change_state failed: " + message) return (result, message) # determine the CAM metadata server and update pubsub cam_server = "None" fengine_stream = "None" polh_stream = "None" polv_stream = "None" self.script.log( 2, "configure: streams.keys()=" + str(streams.keys())) self.script.log( 2, "configure: streams['cam.http'].keys()=" + str(streams['cam.http'].keys())) if 'cam.http' in streams.keys( ) and 'camdata' in streams['cam.http'].keys(): cam_server = streams['cam.http']['camdata'] self.script.log(2, "configure: cam_server=" + str(cam_server)) if 'cbf.antenna_channelised_voltage' in streams.keys(): stream_name = streams[ 'cbf.antenna_channelised_voltage'].keys()[0] fengine_stream = stream_name.split(".")[0] self.script.log( 2, "configure: fengine_stream=" + str(fengine_stream)) if 'cbf.tied_array_channelised_voltage' in streams.keys(): for s in streams[ 'cbf.tied_array_channelised_voltage'].keys(): if s.endswith('y'): polv_stream = s if s.endswith('x'): polh_stream = s self.script.log( 2, "configure: polh_stream=" + str(polh_stream) + " polv_stream=" + str(polv_stream)) if cam_server != "None" and fengine_stream != "None" and polh_stream != "None": self.script.pubsub.update_cam(cam_server, fengine_stream, polh_stream, polv_stream, antennas) else: response = "Could not extract streams[cam.http][camdata]" self.script.log(1, "configure: cam_server=" + cam_server) self.script.log( 1, "configure: fengine_stream=" + fengine_stream) self.script.log(1, "configure: polh_stream=" + polh_stream) self.script.log(-1, "configure: " + response) return ("fail", response) # restart the pubsub service self.script.log( 1, "configure: restarting pubsub for new meta-data") self.script.pubsub.restart() # determine the X and Y tied array channelised voltage streams mcasts = {} ports = {} key = 'cbf.tied_array_channelised_voltage' if key in streams.keys(): stream = 'i0.tied-array-channelised-voltage.0x' if stream in streams[key].keys(): (mcast, port) = self.parseStreamAddress(streams[key][stream]) mcasts['x'] = mcast ports['x'] = int(port) else: response = "Could not extract streams[" + key + "][" + stream + "]" self.script.log(-1, "configure: " + response) return ("fail", response) stream = 'i0.tied-array-channelised-voltage.0y' if stream in streams[key].keys(): (mcast, port) = self.parseStreamAddress(streams[key][stream]) mcasts['y'] = mcast ports['y'] = int(port) else: response = "Could not extract streams[" + key + "][" + stream + "]" self.script.log(-1, "configure: " + response) return ("fail", response) # if the backend nchan is < CAM nchan self.script.log(1, "configure: n_channels=" + str(n_channels)) if int(n_channels) == 1024 and False: nchan = 992 self.script.log( 1, "configure: reconfiguring MCAST groups from " + mcasts['x'] + ", " + mcasts['y']) (mcast_base_x, mcast_ngroups_x) = mcasts['x'].split("+") (mcast_base_y, mcast_ngroups_y) = mcasts['y'].split("+") nchan_per_group = int(n_channels) / int(mcast_ngroups_x) new_ngroups = nchan / nchan_per_group offset = (int(mcast_ngroups_x) - new_ngroups) / 2 self.script.log( 1, "configure: nchan_per_group=" + str(nchan_per_group) + " new_ngroups=" + str(new_ngroups) + " offset=" + str(offset)) parts_x = mcast_base_x.split(".") parts_y = mcast_base_y.split(".") parts_x[3] = str(int(parts_x[3]) + offset) parts_y[3] = str(int(parts_y[3]) + offset) mcasts['x'] = ".".join(parts_x) + "+" + str(new_ngroups) mcasts['y'] = ".".join(parts_y) + "+" + str(new_ngroups) self.script.log( 1, "configure: reconfigured MCAST groups to " + mcasts['x'] + ", " + mcasts['y']) self.script.log( 1, "configure: connecting to RECV instance to update configuration" ) for istream in range(int(self.script.cfg["NUM_STREAM"])): (host, beam_idx, subband) = self.script.cfg["STREAM_" + str(istream)].split(":") beam = self.script.cfg["BEAM_" + beam_idx] self.script.log( 1, "configure: istream=" + str(istream) + " beam=" + beam + " script.beam_name=" + self.script.beam_name) if beam == self.script.beam_name: # reset ADC_SYNC_TIME on the beam self.script.beam_config["lock"].acquire() self.script.beam_config["ADC_SYNC_TIME"] = "0" self.script.beam_config["lock"].release() port = int( self.script.cfg["STREAM_RECV_PORT"]) + istream self.script.log( 1, "configure: connecting to " + host + ":" + str(port)) sock = sockets.openSocket(DL, host, port, 1) if sock: req = "<?req version='1.0' encoding='ISO-8859-1'?>" req += "<recv_cmd>" req += "<command>configure</command>" req += "<params>" req += "<param key='DATA_MCAST_0'>" + mcasts[ 'x'] + "</param>" req += "<param key='DATA_PORT_0'>" + str( ports['x']) + "</param>" req += "<param key='META_MCAST_0'>" + mcasts[ 'x'] + "</param>" req += "<param key='META_PORT_0'>" + str( ports['x']) + "</param>" req += "<param key='DATA_MCAST_1'>" + mcasts[ 'y'] + "</param>" req += "<param key='DATA_PORT_1'>" + str( ports['y']) + "</param>" req += "<param key='META_MCAST_1'>" + mcasts[ 'y'] + "</param>" req += "<param key='META_PORT_1'>" + str( ports['y']) + "</param>" req += "</params>" req += "</recv_cmd>" self.script.log( 1, "configure: sending XML req [" + req + "]") sock.send(req) self.script.log( 1, "configure: send XML, receiving reply") recv_reply = sock.recv(65536) self.script.log( 1, "configure: received " + recv_reply) sock.close() else: response = "configure: could not connect to stream " + str( istream) + " at " + host + ":" + str(port) self.script.log(-1, "configure: " + response) return ("fail", response) return ("ok", "data product " + str(data_product_id) + " configured") else: response = "expected 0, 1 or 5 arguments, received " + str( len(msg.arguments)) self.script.log(-1, "configure: " + response) return ("fail", response)
def request_target_start (self, req, target_name): """Commence data processing using target.""" self.script.log (1, "request_target_start(" + target_name+")") if self._data_product["id"] == "None": return ("fail", "data product was not configured") self.script.log (1, "request_target_start ADC_SYNC_TIME=" + self.script.cam_config["ADC_SYNC_TIME"]) self.script.beam_config["lock"].acquire() self.script.beam_config["TARGET"] = self.script.cam_config["TARGET"] if self.script.cam_config["ADC_SYNC_TIME"] != "0": self.script.beam_config["ADC_SYNC_TIME"] = self.script.cam_config["ADC_SYNC_TIME"] self.script.beam_config["NCHAN_PER_STREAM"] = self.script.cam_config["NCHAN_PER_STREAM"] self.script.beam_config["PRECISETIME_FRACTION_POLV"] = self.script.cam_config["PRECISETIME_FRACTION_POLV"] self.script.beam_config["PRECISETIME_FRACTION_POLH"] = self.script.cam_config["PRECISETIME_FRACTION_POLH"] self.script.beam_config["PRECISETIME_UNCERTAINTY_POLV"] = self.script.cam_config["PRECISETIME_UNCERTAINTY_POLV"] self.script.beam_config["PRECISETIME_UNCERTAINTY_POLH"] = self.script.cam_config["PRECISETIME_UNCERTAINTY_POLH"] self.script.beam_config["TFR_KTT_GNSS"] = self.script.cam_config["TFR_KTT_GNSS"] self.script.beam_config["ITRF"] = self.script.cam_config["ITRF"] self.script.beam_config["OBSERVER"] = self.script.cam_config["OBSERVER"] self.script.beam_config["ANTENNAE"] = self.script.cam_config["ANTENNAE"] self.script.beam_config["SCHEDULE_BLOCK_ID"] = self.script.cam_config["SCHEDULE_BLOCK_ID"] self.script.beam_config["PROPOSAL_ID"] = self.script.cam_config["PROPOSAL_ID"] self.script.beam_config["EXPERIMENT_ID"] = self.script.cam_config["EXPERIMENT_ID"] self.script.beam_config["DESCRIPTION"] = self.script.cam_config["DESCRIPTION"] self.script.beam_config["RA"] = self.script.cam_config["RA"] self.script.beam_config["DEC"] = self.script.cam_config["DEC"] self.script.beam_config["POLH_WEIGHTS"] = self.script.cam_config["POLH_WEIGHTS"] self.script.beam_config["POLV_WEIGHTS"] = self.script.cam_config["POLV_WEIGHTS"] self.script.beam_config["CBF_INPUTS"] = self.script.cam_config["CBF_INPUTS"] self.script.beam_config["SIDEBAND"] = self.script.cam_config["SIDEBAND"] self.script.beam_config["lock"].release() fold_mode = self.script.beam_config["PERFORM_FOLD"] == "1" ra = self.script.beam_config["RA"] dec = self.script.beam_config["DEC"] source = target_name.replace(" ", "") self.script.log (1, "fold_mode=" + str(fold_mode) + " source=" + source + " RA=" + ra + " DEC=" + dec) if fold_mode: # check the pulsar specified is listed in the catalog (result, message) = self.test_pulsar_valid (source) self.script.log (1, source + " pulsar validity was " + str(result) + ": " + message) if result != "ok": return (result, message) # convert the supplied RA/DEC in hours to HHMMSS/DDMMSS if ra != "None": hhmmss = coordinates.convert_hours_to_hhmmss(float(ra)) self.script.log (2, "converted " + ra + " to " + hhmmss) self.script.beam_config["RA"] = hhmmss else: if fold_mode: (reply, message) = catalog.get_pulsar_param (source, "raj") self.script.log (2, "catalog raj=" + message) if reply: self.script.beam_config["RA"] = message else: self.script.log (1, message) if dec != "None": ddmmss = coordinates.convert_degrees_to_ddmmss(float(dec)) self.script.log (2, "converted " + dec + " to " + ddmmss) self.script.beam_config["DEC"] = ddmmss else: if fold_mode: (reply, message) = catalog.get_pulsar_param (source, "decj") self.script.log (2, "catalog decj=" + message) if reply: self.script.beam_config["DEC"] = message else: self.script.log (1, message) # check the ADC_SYNC_TIME is valid for this beam if self.script.beam_config["ADC_SYNC_TIME"] == "0": return ("fail", "ADC Synchronisation Time was not valid") # change the state (result, message) = self.change_state ("target_start") if result != "ok": self.script.log (-1, "target_start: change_state failed: " + message) return (result, message) # set the pulsar name, this should include a check if the pulsar is in the catalog self.script.beam_config["lock"].acquire() self.script.beam_config["SOURCE"] = source self.script.beam_config["lock"].release() host = self.script.tcs_host port = self.script.tcs_port self.script.log (2, "request_target_start: opening socket to " + host + ":" + str(port)) sock = sockets.openSocket (DL, host, int(port), 1) if sock: xml = self.script.get_xml_config() self.script.log (2, "request_target_start: get_xml_config=" + str(xml)) sock.send(xml + "\r\n") reply = sock.recv (65536) self.script.log (2, "request_target_start: reply=" + str(reply)) xml = self.script.get_xml_start_cmd() self.script.log (2, "request_target_start: get_xml_start_cmd=" + str(xml)) sock.send(xml + "\r\n") reply = sock.recv (65536) self.script.log (2, "request_target_start: reply=" + str(reply)) sock.close() return ("ok", "") else: return ("fail", "could not connect to TCS")
def main (self, id): # connect to the various scripts running to collect the information # to be provided by the KATCPServer instance as sensors time.sleep(2) while not self.quit_event.isSet(): self.log(3, "KATCPDaemon::main loop start") # the primary function of the KATCPDaemon is to update the # sensors in the DeviceServer periodically # TODO compute overall device status self.katcp._device_status.set_value("ok") # configure fixed sensors self.katcp._beam_sensors["input_channels"].set_value (self.input_nchan) # connect to SPIP_LMC to retreive temperature information if self.quit_event.isSet(): self.log(2, "KATCPDaemon::main quit_event was set, exiting main 1") return (host, port) = self.lmc.split(":") self.log(3, "KATCPDaemon::main updating sensors from LMC") try: self.log(3, "KATCPDaemon::main openSocket("+host+","+port+")") sock = sockets.openSocket (DL, host, int(port), 1) self.log(3, "KATCPDaemon::main socket opened") if sock: sock.settimeout(1.0) sock.send(self.lmc_cmd) lmc_reply = sock.recv (65536) xml = xmltodict.parse(lmc_reply) sock.close() if self.quit_event.isSet(): self.log(2, "KATCPDaemon::main quit_event was set, exiting main 2") return self.log(3, "KATCPDaemon::main update_lmc_sensors("+host+",[xml])") self.update_lmc_sensors(host, xml) except socket.error as e: self.log(2, "KATCPDaemon::main socket error on LMC sensor read") if e.errno == errno.ECONNRESET: self.log(1, "lmc connection was unexpectedly closed") sock.close() except: self.log(2, "KATCPDaemon::main other exception on LMC sensor read") sock.close() self.log(3, "KATCPDaemon::main received LMC data") # connect to SPIP_REPACK to retrieve Pulsar SNR performance if self.quit_event.isSet(): return self.log(3, "KATCPDaemon::main pulsar SNR sensor from REPACK") (host, port) = self.repack.split(":") try: sock = sockets.openSocket (DL, host, int(port), 1) if sock: sock.send (self.repack_cmd) repack_reply = sock.recv (65536) xml = xmltodict.parse(repack_reply) sock.close() if self.quit_event.isSet(): return self.log(3, "KATCPDaemon::main update_repack_sensors("+host+",[xml])") self.update_repack_sensors(host, xml) except socket.error as e: if e.errno == errno.ECONNRESET: self.log(1, "repack connection was unexpectedly closed") sock.close() except: self.log(2, "KATCPDaemon::main other exception on repack sensor read") sock.close() # connect to STAT (TBD) to retrieve beam-former power levels host = "None" xml = "None" self.update_stat_sensors (host, xml) self.log(3, "KATCPDaemon::main sleeping for 5 seconds") to_sleep = 5 while not self.quit_event.isSet() and to_sleep > 0: to_sleep -= 1 time.sleep (1) if not self.katcp.running(): self.log (-2, "KATCP server was not running, exiting") self.quit_event.set()
def issue_start_cmd(self, xml): self.log( 2, "TCSDaemon::issue_start_cmd nbeam=" + xml['obs_cmd']['beam_configuration']['nbeam']['#text']) # determine which beams this command corresponds to for ibeam in range( int(xml['obs_cmd']['beam_configuration']['nbeam']['#text'])): state = xml['obs_cmd']['beam_configuration']['beam_state_' + str(ibeam)]['#text'] self.log(2, "TCSDaemon::issue_start_cmd beam state=" + state) if state == "1" or state == "on": b = xml['obs_cmd']['beam_configuration']['beam_state_' + str(ibeam)]['@name'] self.log(2, "TCSDaemon::issue_start_cmd beam name=" + b) if b in self.beam_states.keys(): self.log( 2, "TCSDaemon::issue_start_cmd config=" + str(self.beam_states[b]["config"].keys())) obs_config = {} self.beam_states[b]["lock"].acquire() utc_start = "unset" source = "unset" # add source parameters s = self.beam_states[b]["config"]["source_parameters"] for k in s.keys(): key = s[k]["@key"] val = s[k]["#text"] obs_config[key] = val self.log(1, key + "=" + val) if key == "SOURCE": source = val # add the observation parameters o = self.beam_states[b]["config"]["observation_parameters"] self.log(1, "TCSDaemon::issue_start_cmd o=" + str(o)) self.log( 1, "TCSDaemon::issue_start_cmd checking value of supplied UTC start: [" + o["utc_start"]["#text"] + "]") # if no UTC_START has been specified, set it to +5 seconds if o["utc_start"]["#text"] == "None": utc_start = times.getUTCTime(self.start_offset_seconds) o["utc_start"]["#text"] = utc_start self.log( 1, "TCSDaemon::issue_start_cmd utc_start=" + utc_start) else: self.log( 1, "TCSDaemon::issue_start_cmd utc_start already set " + o["utc_start"]["#text"]) for k in o.keys(): key = o[k]["@key"] try: val = o[k]["#text"] except KeyError as e: val = '' obs_config[key] = val self.log(1, key + "=" + val) # add the calibration parameters o = self.beam_states[b]["config"]["calibration_parameters"] for k in o.keys(): key = o[k]["@key"] try: val = o[k]["#text"] except KeyError as e: val = '' obs_config[key] = val self.log(1, key + "=" + val) # hack for DSPSR requiring this parameter if key == "CAL_FREQ": obs_config["CALFREQ"] = val # extract the stream informatiom s = self.beam_states[b]["config"]["stream_configuration"] # determine the number of streams present in the configure command nstream = s["nstream"]["#text"] if int(nstream) != int(self.cfg["NUM_STREAM"]): self.log( 1, "TCSDaemon::issue_start_cmd number of streams in config and command did not match" ) # record which streams are processing which modes stream_modes = {} # work out which streams correspond to these beams for istream in range(int(nstream)): stream_active = False stream_xml = self.beam_states[b]["config"][ "stream" + str(istream)] # make a deep copy of the common configuration stream_config = copy.deepcopy(obs_config) # inject custom fields into header custom = stream_xml["custom_parameters"] for k in custom.keys(): key = custom[k]["@key"] try: val = custom[k]["#text"] except KeyError as e: val = '' stream_config[key] = val self.log(2, key + "=" + val) modes = stream_xml["processing_modes"] for k in modes.keys(): key = modes[k]["@key"] val = modes[k]["#text"] stream_config[key] = val self.log(2, key + "=" + val) # inject processing parameters into header if val == "true" or val == "1": if not (k in stream_modes.keys()): stream_modes[k] = [] stream_modes[k].append(istream) stream_active = True self.log( 2, "TCSDaemon::issue_start_cmd mode=" + k) p = stream_xml[k + "_processing_parameters"] for l in p.keys(): pkey = p[l]["@key"] try: pval = p[l]["#text"] except KeyError as e: val = '' stream_config[pkey] = pval self.log(2, pkey + "=" + pval) # ensure the start command is set stream_config["COMMAND"] = "START" stream_config["OBS_OFFSET"] = "0" # convert to a single ascii string obs_header = Config.writeDictToString(stream_config) (host, beam_idx, subband) = self.cfg["STREAM_" + str(istream)].split(":") beam = self.cfg["BEAM_" + beam_idx] # connect to streams for this beam only if stream_active and beam == b: self.log( 2, "TCSDaemon::issue_start_cmd host=" + host + " beam=" + beam + " subband=" + subband) # control port the this recv stream ctrl_port = int( self.cfg["STREAM_CTRL_PORT"]) + istream self.log(2, host + ":" + str(ctrl_port) + " <- start") # connect to recv agent and provide observation configuration self.log( 2, "TCSDaemon::issue_start_cmd openSocket(" + host + "," + str(ctrl_port) + ")") recv_sock = sockets.openSocket( DL, host, ctrl_port, 5) if recv_sock: self.log( 3, "TCSDaemon::issue_start_cmd sending obs_header length=" + str(len(obs_header))) recv_sock.send(obs_header) self.log( 3, "TCSDaemon::issue_start_cmd header sent") recv_sock.close() self.log( 3, "TCSDaemon::issue_start_cmd socket closed") else: self.log( -2, "TCSDaemon::issue_start_cmd failed to connect to " + host + ":" + str(ctrl_port)) # connect to spip_gen and issue start command for UTC # assumes gen host is the same as the recv host! # gen_port = int(self.cfg["STREAM_GEN_PORT"]) + istream # sock = sockets.openSocket (DL, host, gen_port, 1) # if sock: # sock.send(obs_header) # sock.close() utc_start = self.beam_states[b]["config"][ "observation_parameters"]["utc_start"]["#text"] # update the dict of observing info for this beam self.beam_states[b]["state"] = "Recording" self.beam_states[b]["lock"].release() # now handle the active streams for mode in stream_modes.keys(): self.log( 1, "TCSDaemon::issue_start_cmd mode=" + mode + " streams=" + str(stream_modes[mode])) self.prepare_observation(beam, utc_start, source, mode, stream_modes[mode])
def request_configure(self, req, msg): """Prepare and configure for the reception of the data_product_id.""" self.script.log (1, "request_configure: nargs= " + str(len(msg.arguments)) + " msg=" + str(msg)) if len(msg.arguments) == 0: self.script.log (-1, "request_configure: no arguments provided") return ("ok", "configured data products: TBD") # the sub-array identifier data_product_id = msg.arguments[0] if len(msg.arguments) == 1: self.script.log (1, "request_configure: request for configuration of " + str(data_product_id)) if data_product_id == self._data_product["id"]: configuration = str(data_product_id) + " " + \ str(self._data_product['antennas']) + " " + \ str(self._data_product['n_channels']) + " " + \ str(self._data_product['cbf_source']) + " " + \ str(self._data_product['proxy_name']) self.script.log (1, "request_configure: configuration of " + str(data_product_id) + "=" + configuration) return ("ok", configuration) else: self.script.log (-1, "request_configure: no configuration existed for " + str(data_product_id)) return ("fail", "no configuration existed for " + str(data_product_id)) if len(msg.arguments) == 5: # if the configuration for the specified data product matches extactly the # previous specification for that data product, then no action is required self.script.log (1, "configure: configuring " + str(data_product_id)) if data_product_id == self._data_product["id"] and \ self._data_product['antennas'] == msg.arguments[1] and \ self._data_product['n_channels'] == msg.arguments[2] and \ self._data_product['cbf_source'] == str(msg.arguments[3]) and \ self._data_product['proxy_name'] == str(msg.arguments[4]): response = "configuration for " + str(data_product_id) + " matched previous" self.script.log (1, "configure: " + response) return ("ok", response) # the data product requires configuration else: self.script.log (1, "configure: new data product " + data_product_id) # TODO decide what to do regarding preconfigured params (e.g. FREQ, BW) vs CAM supplied values # determine which sub-array we are matched against the_sub_array = -1 for i in range(4): self.script.log (1, "configure: testing self.data_product_res[" + str(i) +"].match(" + data_product_id +")") if self.data_product_res[i].match (data_product_id): the_sub_array = i + 1 if the_sub_array == -1: self.script.log (1, "configure: could not match subarray from " + data_product_id) return ("fail", "could not data product to sub array") antennas = msg.arguments[1] n_channels = msg.arguments[2] cbf_source = str(msg.arguments[3]) streams = json.loads (msg.arguments[3]) proxy_name = str(msg.arguments[4]) self.script.log (2, "configure: streams="+str(streams)) # check if the number of existing + new beams > available # (cfreq, bwd, nchan1) = self.script.cfg["SUBBAND_CONFIG_0"].split(":") # (cfreq, bwd, nchan2) = self.script.cfg["SUBBAND_CONFIG_1"].split(":") # nchan = int(nchan1) + int(nchan2) #if nchan != int(n_channels): # self._data_product.pop(data_product_id, None) # response = "PTUSE configured for " + str(nchan) + " channels" # self.script.log (-1, "configure: " + response) # return ("fail", response) self._data_product['id'] = data_product_id self._data_product['antennas'] = antennas self._data_product['n_channels'] = n_channels self._data_product['cbf_source'] = cbf_source self._data_product['streams'] = str(streams) self._data_product['proxy_name'] = proxy_name self._data_product['state'] = "unconfigured" # change the state (result, message) = self.change_state ("configure") if result != "ok": self.script.log (-1, "configure: change_state failed: " + message) return (result, message) # determine the CAM metadata server and update pubsub cam_server = "None" fengine_stream = "None" polh_stream = "None" polv_stream = "None" self.script.log (2, "configure: streams.keys()=" + str(streams.keys())) self.script.log (2, "configure: streams['cam.http'].keys()=" + str(streams['cam.http'].keys())) if 'cam.http' in streams.keys() and 'camdata' in streams['cam.http'].keys(): cam_server = streams['cam.http']['camdata'] self.script.log (2,"configure: cam_server="+str(cam_server)) if 'cbf.antenna_channelised_voltage' in streams.keys(): stream_name = streams['cbf.antenna_channelised_voltage'].keys()[0] fengine_stream = stream_name.split(".")[0] self.script.log (2,"configure: fengine_stream="+str(fengine_stream)) if 'cbf.tied_array_channelised_voltage' in streams.keys(): for s in streams['cbf.tied_array_channelised_voltage'].keys(): if s.endswith('y'): polv_stream = s if s.endswith('x'): polh_stream = s self.script.log (2,"configure: polh_stream="+str(polh_stream) + " polv_stream=" + str(polv_stream)) if cam_server != "None" and fengine_stream != "None" and polh_stream != "None": self.script.pubsub.update_cam (cam_server, fengine_stream, polh_stream, polv_stream, antennas) else: response = "Could not extract streams[cam.http][camdata]" self.script.log (1, "configure: cam_server=" + cam_server) self.script.log (1, "configure: fengine_stream=" + fengine_stream) self.script.log (1, "configure: polh_stream=" + polh_stream) self.script.log (-1, "configure: " + response) return ("fail", response) # restart the pubsub service self.script.log (1, "configure: restarting pubsub for new meta-data") self.script.pubsub.restart() # determine the X and Y tied array channelised voltage streams mcasts = {} ports = {} key = 'cbf.tied_array_channelised_voltage' if key in streams.keys(): stream = 'i0.tied-array-channelised-voltage.0x' if stream in streams[key].keys(): (mcast, port) = self.parseStreamAddress (streams[key][stream]) mcasts['x'] = mcast ports['x'] = int(port) else: response = "Could not extract streams["+key+"]["+stream+"]" self.script.log (-1, "configure: " + response) return ("fail", response) stream = 'i0.tied-array-channelised-voltage.0y' if stream in streams[key].keys(): (mcast, port) = self.parseStreamAddress (streams[key][stream]) mcasts['y'] = mcast ports['y'] = int(port) else: response = "Could not extract streams["+key+"]["+stream+"]" self.script.log (-1, "configure: " + response) return ("fail", response) (mcast_base_x, mcast_ngroups_x) = mcasts['x'].split("+") (mcast_base_y, mcast_ngroups_y) = mcasts['y'].split("+") nchan_per_group = int(n_channels) / int(mcast_ngroups_x) self.script.log (1, "configure: connecting to RECV instance to update configuration") for istream in range(int(self.script.cfg["NUM_STREAM"])): (host, beam_idx, subband) = self.script.cfg["STREAM_" + str(istream)].split(":") beam = self.script.cfg["BEAM_" + beam_idx] self.script.log (1, "configure: istream="+str(istream)+ " beam=" + beam + " script.beam_name=" + self.script.beam_name) (cfreq, bw, nchan) = self.script.cfg["SUBBAND_CONFIG_" + str(istream)].split(":") (start_chan, end_chan) = self.script.cfg["SUBBAND_CHANS_" + subband].split(":") parts_x = mcast_base_x.split(".") parts_y = mcast_base_y.split(".") last_quartet = int(math.floor(int(start_chan) / nchan_per_group)) parts_x[3] = str(last_quartet) parts_y[3] = str(last_quartet) ngroups = int(math.floor(int(nchan)) / nchan_per_group) - 1 mcast_x = ".".join(parts_x) + "+" + str(ngroups) mcast_y = ".".join(parts_y) + "+" + str(ngroups) self.script.log (1, "configure: istream=" + str(istream) + " mcast_x=" + mcast_x + " mcast_y=" + mcast_y) if beam == self.script.beam_name: # reset ADC_SYNC_TIME on the beam self.script.beam_config["lock"].acquire() self.script.beam_config["ADC_SYNC_TIME"] = "0"; self.script.beam_config["lock"].release() port = int(self.script.cfg["STREAM_RECV_PORT"]) + istream self.script.log (1, "configure: connecting to " + host + ":" + str(port)) sock = sockets.openSocket (DL, host, port, 1) if sock: req = "<?req version='1.0' encoding='ISO-8859-1'?>" req += "<recv_cmd>" req += "<command>configure</command>" req += "<params>" req += "<param key='DATA_MCAST_0'>" + mcast_x + "</param>" req += "<param key='DATA_PORT_0'>" + str(ports['x']) + "</param>" req += "<param key='META_MCAST_0'>" + mcast_x + "</param>" req += "<param key='META_PORT_0'>" + str(ports['x']) + "</param>" req += "<param key='DATA_MCAST_1'>" + mcast_y + "</param>" req += "<param key='DATA_PORT_1'>" + str(ports['y']) + "</param>" req += "<param key='META_MCAST_1'>" + mcast_y + "</param>" req += "<param key='META_PORT_1'>" + str(ports['y']) + "</param>" req += "</params>" req += "</recv_cmd>" self.script.log (1, "configure: sending XML req ["+req+"]") sock.send(req) self.script.log (1, "configure: send XML, receiving reply") recv_reply = sock.recv (65536) self.script.log (1, "configure: received " + recv_reply) sock.close() else: response = "configure: could not connect to stream " + str(istream) + " at " + host + ":" + str(port) self.script.log (-1, "configure: " + response) return ("fail", response) return ("ok", "data product " + str (data_product_id) + " configured") else: response = "expected 0, 1 or 5 arguments, received " + str(len(msg.arguments)) self.script.log (-1, "configure: " + response) return ("fail", response)
def issue_stop_cmd(self, xml): self.log(2, "issue_stop_cmd()") # determine which beams this command corresponds to for ibeam in range( int(xml['obs_cmd']['beam_configuration']['nbeam']['#text'])): state = xml['obs_cmd']['beam_configuration']['beam_state_' + str(ibeam)]['#text'] if state == "1" or state == "on": b = xml['obs_cmd']['beam_configuration']['beam_state_' + str(ibeam)]['@name'] if b in self.beam_states.keys(): self.log(1, "issue_stop_cmd: beam=" + b) obs = {} self.beam_states[b]["lock"].acquire() self.beam_states[b]["state"] = "Stopping" obs["COMMAND"] = "STOP" # inject the observation parameters o = self.beam_states[b]["config"]["observation_parameters"] # if no UTC_STOP has been specified, set it to now if o["utc_stop"]["#text"] == "None": o["utc_stop"]["#text"] = times.getUTCTime() obs["UTC_STOP"] = o["utc_stop"]["#text"] self.beam_states[b]["lock"].release() # convert to a single ascii string obs_header = Config.writeDictToString(obs) # work out which streams correspond to these beams for istream in range(int(self.cfg["NUM_STREAM"])): (host, beam_idx, subband) = self.cfg["STREAM_" + str(istream)].split(":") beam = self.cfg["BEAM_" + beam_idx] self.log( 2, "issue_stop_cmd: host=" + host + " beam=" + beam + " subband=" + subband) # connect to streams for this beam only if beam == b: # control port the this recv stream ctrl_port = int( self.cfg["STREAM_CTRL_PORT"]) + istream # connect to recv agent and provide observation configuration self.log( 3, "issue_stop_cmd: openSocket(" + host + "," + str(ctrl_port) + ")") sock = sockets.openSocket(DL, host, ctrl_port, 1) if sock: self.log( 3, "issue_stop_cmd: sending obs_header len=" + str(len(obs_header))) sock.send(obs_header) self.log(3, "issue_stop_cmd: command sent") sock.close() self.log(3, "issue_stop_cmd: socket closed") # connect to spip_gen and issue stop command for UTC # assumes gen host is the same as the recv host! # gen_port = int(self.cfg["STREAM_GEN_PORT"]) + istream # sock = sockets.openSocket (DL, host, gen_port, 1) # if sock: # sock.send(obs_header) # sock.close() # update the dict of observing info for this beam self.beam_states[b]["lock"].acquire() self.beam_states[b]["state"] = "Idle" self.beam_states[b]["lock"].release()
def request_target_start(self, req, target_name): """Commence data processing using target.""" self.script.log(1, "request_target_start(" + target_name + ")") if self._data_product["id"] == "None": return ("fail", "data product was not configured") self.script.log( 1, "request_target_start ADC_SYNC_TIME=" + self.script.cam_config["ADC_SYNC_TIME"]) self.script.beam_config["lock"].acquire() self.script.beam_config["TARGET"] = self.script.cam_config["TARGET"] if self.script.cam_config["ADC_SYNC_TIME"] != "0": self.script.beam_config["ADC_SYNC_TIME"] = self.script.cam_config[ "ADC_SYNC_TIME"] self.script.beam_config["NCHAN_PER_STREAM"] = self.script.cam_config[ "NCHAN_PER_STREAM"] self.script.beam_config[ "PRECISETIME_FRACTION_POLV"] = self.script.cam_config[ "PRECISETIME_FRACTION_POLV"] self.script.beam_config[ "PRECISETIME_FRACTION_POLH"] = self.script.cam_config[ "PRECISETIME_FRACTION_POLH"] self.script.beam_config[ "PRECISETIME_UNCERTAINTY_POLV"] = self.script.cam_config[ "PRECISETIME_UNCERTAINTY_POLV"] self.script.beam_config[ "PRECISETIME_UNCERTAINTY_POLH"] = self.script.cam_config[ "PRECISETIME_UNCERTAINTY_POLH"] self.script.beam_config["TFR_KTT_GNSS"] = self.script.cam_config[ "TFR_KTT_GNSS"] self.script.beam_config["ITRF"] = self.script.cam_config["ITRF"] self.script.beam_config["OBSERVER"] = self.script.cam_config[ "OBSERVER"] self.script.beam_config["ANTENNAE"] = self.script.cam_config[ "ANTENNAE"] self.script.beam_config["SCHEDULE_BLOCK_ID"] = self.script.cam_config[ "SCHEDULE_BLOCK_ID"] self.script.beam_config["PROPOSAL_ID"] = self.script.cam_config[ "PROPOSAL_ID"] self.script.beam_config["EXPERIMENT_ID"] = self.script.cam_config[ "EXPERIMENT_ID"] self.script.beam_config["DESCRIPTION"] = self.script.cam_config[ "DESCRIPTION"] self.script.beam_config["lock"].release() # check the pulsar specified is listed in the catalog (result, message) = self.test_pulsar_valid(target_name) if result != "ok": return (result, message) # check the ADC_SYNC_TIME is valid for this beam if self.script.beam_config["ADC_SYNC_TIME"] == "0": return ("fail", "ADC Synchronisation Time was not valid") # change the state (result, message) = self.change_state("target_start") if result != "ok": self.script.log(-1, "target_start: change_state failed: " + message) return (result, message) # set the pulsar name, this should include a check if the pulsar is in the catalog self.script.beam_config["lock"].acquire() self.script.beam_config["SOURCE"] = target_name self.script.beam_config["lock"].release() host = self.script.tcs_host port = self.script.tcs_port self.script.log( 2, "request_target_start: opening socket to " + host + ":" + str(port)) sock = sockets.openSocket(DL, host, int(port), 1) if sock: xml = self.script.get_xml_config() self.script.log(2, "request_target_start: get_xml_config=" + str(xml)) sock.send(xml + "\r\n") reply = sock.recv(65536) self.script.log(2, "request_target_start: reply=" + str(reply)) xml = self.script.get_xml_start_cmd() self.script.log( 2, "request_target_start: get_xml_start_cmd=" + str(xml)) sock.send(xml + "\r\n") reply = sock.recv(65536) self.script.log(2, "request_target_start: reply=" + str(reply)) sock.close() return ("ok", "") else: return ("fail", "could not connect to TCS")
def setup_sensors_host (self, host, port): self.script.log(1, "KATCPServer::setup_sensors_host ("+host+","+port+")") sock = sockets.openSocket (DL, host, int(port), 1) if sock: self.script.log(2, "KATCPServer::setup_sensors_host sock.send(" + self.script.lmc_cmd + ")") sock.send (self.script.lmc_cmd + "\r\n") lmc_reply = sock.recv (65536) sock.close() xml = xmltodict.parse(lmc_reply) self.script.log(2, "KATCPServer::setup_sensors_host sock.recv=" + str(xml)) self._host_sensors = {} # Disk sensors self.script.log(2, "KATCPServer::setup_sensors_host configuring disk sensors") disk_prefix = host+".disk" self._host_sensors["disk_size"] = Sensor.float(disk_prefix+".size", description=host+": disk size", unit="MB", params=[8192,1e9], default=0) self._host_sensors["disk_available"] = Sensor.float(disk_prefix+".available", description=host+": disk available space", unit="MB", params=[1024,1e9], default=0) self.add_sensor(self._host_sensors["disk_size"]) self.add_sensor(self._host_sensors["disk_available"]) # Server Load sensors self.script.log(2, "KATCPServer::setup_sensors_host configuring load sensors") self._host_sensors["num_cores"] = Sensor.integer (host+".num_cores", description=host+": disk available space", unit="MB", params=[1,64], default=0) self._host_sensors["load1"] = Sensor.float(host+".load.1min", description=host+": 1 minute load ", unit="", default=0) self._host_sensors["load5"] = Sensor.float(host+".load.5min", description=host+": 5 minute load ", unit="", default=0) self._host_sensors["load15"] = Sensor.float(host+".load.15min", description=host+": 15 minute load ", unit="", default=0) self._host_sensors["local_time_synced"] = Sensor.boolean("local_time_synced", description=host+": NTP server synchronisation", unit="", default=0) self.add_sensor(self._host_sensors["num_cores"]) self.add_sensor(self._host_sensors["num_cores"]) self.add_sensor(self._host_sensors["load1"]) self.add_sensor(self._host_sensors["load5"]) self.add_sensor(self._host_sensors["load15"]) self.add_sensor(self._host_sensors["local_time_synced"]) cpu_temp_pattern = re.compile("cpu[0-9]+_temp") fan_speed_pattern = re.compile("fan[0-9,a-z]+") power_supply_pattern = re.compile("ps[0-9]+_status") self.script.log(2, "KATCPServer::setup_sensors_host configuring other metrics") if not xml["lmc_reply"]["sensors"] == None: for sensor in xml["lmc_reply"]["sensors"]["metric"]: name = sensor["@name"] if name == "system_temp": self._host_sensors[name] = Sensor.float((host+".system_temp"), description=host+": system temperature", unit="C", params=[-20,150], default=0) self.add_sensor(self._host_sensors[name]) if cpu_temp_pattern.match(name): (cpu, junk) = name.split("_") self._host_sensors[name] = Sensor.float((host+"." + name), description=host+": "+ cpu +" temperature", unit="C", params=[-20,150], default=0) self.add_sensor(self._host_sensors[name]) if fan_speed_pattern.match(name): self._host_sensors[name] = Sensor.float((host+"." + name), description=host+": "+name+" speed", unit="RPM", params=[0,20000], default=0) self.add_sensor(self._host_sensors[name]) if power_supply_pattern.match(name): self._host_sensors[name] = Sensor.boolean((host+"." + name), description=host+": "+name, unit="", default=0) self.add_sensor(self._host_sensors[name]) # TODO consider adding power supply sensors: e.g. # device-status-kronos1-powersupply1 # device-status-kronos1-powersupply2 # device-status-kronos2-powersupply1 # device-status-kronos2-powersupply2 # TODO consider adding raid/disk sensors: e.g. # device-status-<host>-raid # device-status-<host>-raid-disk1 # device-status-<host>-raid-disk2 self.script.log(2, "KATCPServer::setup_sensors_host done!") else: self.script.log(2, "KATCPServer::setup_sensors_host no sensors found") else: self.script.log(-2, "KATCPServer::setup_sensors_host: could not connect to LMC")
def issue_start_cmd (self, xml): self.log(2, "TCSDaemon::issue_start_cmd nbeam=" + xml['obs_cmd']['beam_configuration']['nbeam']['#text']) # determine which beams this command corresponds to for ibeam in range(int(xml['obs_cmd']['beam_configuration']['nbeam']['#text'])): state = xml['obs_cmd']['beam_configuration']['beam_state_' + str(ibeam)]['#text'] self.log(2, "TCSDaemon::issue_start_cmd beam state=" + state) if state == "1" or state == "on": b = xml['obs_cmd']['beam_configuration']['beam_state_' + str(ibeam)]['@name'] self.log(2, "TCSDaemon::issue_start_cmd beam name=" + b) if b in self.beam_states.keys(): self.log(2, "TCSDaemon::issue_start_cmd config=" + str(self.beam_states[b]["config"].keys())) obs_config = {} self.beam_states[b]["lock"].acquire() utc_start = "unset" source = "unset" # add source parameters s = self.beam_states[b]["config"]["source_parameters"] for k in s.keys(): key = s[k]["@key"] val = s[k]["#text"] obs_config[key] = val self.log (1, key + "=" + val) if key == "SOURCE": source = val # add the observation parameters o = self.beam_states[b]["config"]["observation_parameters"] self.log(1, "TCSDaemon::issue_start_cmd o=" + str(o)) self.log(1, "TCSDaemon::issue_start_cmd checking value of supplied UTC start: [" + o["utc_start"]["#text"] + "]" ) # if no UTC_START has been specified, set it to +5 seconds if o["utc_start"]["#text"] == "None": utc_start = times.getUTCTime(self.start_offset_seconds) o["utc_start"]["#text"] = utc_start self.log(1, "TCSDaemon::issue_start_cmd utc_start=" + utc_start) else: self.log(1, "TCSDaemon::issue_start_cmd utc_start already set " + o["utc_start"]["#text"]) for k in o.keys(): key = o[k]["@key"] try: val = o[k]["#text"] except KeyError as e: val = '' obs_config[key] = val self.log(1, key + "=" + val) # add the calibration parameters o = self.beam_states[b]["config"]["calibration_parameters"] for k in o.keys(): key = o[k]["@key"] try: val = o[k]["#text"] except KeyError as e: val = '' obs_config[key] = val self.log(1, key + "=" + val) # hack for DSPSR requiring this parameter if key == "CAL_FREQ": obs_config["CALFREQ"] = val # extract the stream informatiom s = self.beam_states[b]["config"]["stream_configuration"] # determine the number of streams present in the configure command nstream = s["nstream"]["#text"] if int(nstream) != int(self.cfg["NUM_STREAM"]): self.log(1, "TCSDaemon::issue_start_cmd number of streams in config and command did not match") # record which streams are processing which modes stream_modes = {} # work out which streams correspond to these beams for istream in range(int(nstream)): stream_active = False stream_xml = self.beam_states[b]["config"]["stream" + str(istream)] # make a deep copy of the common configuration stream_config = copy.deepcopy (obs_config) # inject custom fields into header custom = stream_xml["custom_parameters"] for k in custom.keys(): key = custom[k]["@key"] try: val = custom[k]["#text"] except KeyError as e: val = '' stream_config[key] = val self.log(2, key + "=" + val) modes = stream_xml["processing_modes"] for k in modes.keys(): key = modes[k]["@key"] val = modes[k]["#text"] stream_config[key] = val self.log(2, key + "=" + val) # inject processing parameters into header if val == "true" or val == "1": if not (k in stream_modes.keys()): stream_modes[k] = [] stream_modes[k].append(istream) stream_active = True self.log (2, "TCSDaemon::issue_start_cmd mode=" + k) p = stream_xml[k + "_processing_parameters"] for l in p.keys(): pkey = p[l]["@key"] try: pval = p[l]["#text"] except KeyError as e: val = '' stream_config[pkey] = pval self.log(2, pkey + "=" + pval) # ensure the start command is set stream_config["COMMAND"] = "START" stream_config["OBS_OFFSET"] = "0" # convert to a single ascii string obs_header = Config.writeDictToString (stream_config) (host, beam_idx, subband) = self.cfg["STREAM_"+str(istream)].split(":") beam = self.cfg["BEAM_" + beam_idx] # connect to streams for this beam only if stream_active and beam == b: self.log(2, "TCSDaemon::issue_start_cmd host="+host+" beam="+beam+" subband="+subband) # control port the this recv stream ctrl_port = int(self.cfg["STREAM_CTRL_PORT"]) + istream self.log(2, host + ":" + str(ctrl_port) + " <- start") # connect to recv agent and provide observation configuration self.log(2, "TCSDaemon::issue_start_cmd openSocket("+host+","+str(ctrl_port)+")") recv_sock = sockets.openSocket (DL, host, ctrl_port, 5) if recv_sock: self.log(3, "TCSDaemon::issue_start_cmd sending obs_header length=" + str(len(obs_header))) recv_sock.send(obs_header) self.log(3, "TCSDaemon::issue_start_cmd header sent") recv_sock.close() self.log(3, "TCSDaemon::issue_start_cmd socket closed") else: self.log(-2, "TCSDaemon::issue_start_cmd failed to connect to "+host+":"+str(ctrl_port)) # connect to spip_gen and issue start command for UTC # assumes gen host is the same as the recv host! # gen_port = int(self.cfg["STREAM_GEN_PORT"]) + istream # sock = sockets.openSocket (DL, host, gen_port, 1) # if sock: # sock.send(obs_header) # sock.close() utc_start = self.beam_states[b]["config"]["observation_parameters"]["utc_start"]["#text"] # update the dict of observing info for this beam self.beam_states[b]["state"] = "Recording" self.beam_states[b]["lock"].release() # now handle the active streams for mode in stream_modes.keys(): self.log(1, "TCSDaemon::issue_start_cmd mode=" + mode + " streams=" + str(stream_modes[mode])) self.prepare_observation (beam, utc_start, source, mode, stream_modes[mode])
def issue_stop_cmd (self, xml): self.log(2, "issue_stop_cmd()") # determine which beams this command corresponds to for ibeam in range(int(xml['obs_cmd']['beam_configuration']['nbeam']['#text'])): state = xml['obs_cmd']['beam_configuration']['beam_state_' + str(ibeam)]['#text'] if state == "1" or state == "on": b = xml['obs_cmd']['beam_configuration']['beam_state_' + str(ibeam)]['@name'] if b in self.beam_states.keys(): self.log(1, "issue_stop_cmd: beam=" + b) obs = {} self.beam_states[b]["lock"].acquire() self.beam_states[b]["state"] = "Stopping" obs["COMMAND"] = "STOP" # inject the observation parameters o = self.beam_states[b]["config"]["observation_parameters"] # if no UTC_STOP has been specified, set it to now if o["utc_stop"]["#text"] == "None": o["utc_stop"]["#text"] = times.getUTCTime() obs["UTC_STOP"] = o["utc_stop"]["#text"] self.beam_states[b]["lock"].release() # convert to a single ascii string obs_header = Config.writeDictToString (obs) # work out which streams correspond to these beams for istream in range(int(self.cfg["NUM_STREAM"])): (host, beam_idx, subband) = self.cfg["STREAM_"+str(istream)].split(":") beam = self.cfg["BEAM_" + beam_idx] self.log(2, "issue_stop_cmd: host="+host+" beam="+beam+" subband="+subband) # connect to streams for this beam only if beam == b: # control port the this recv stream ctrl_port = int(self.cfg["STREAM_CTRL_PORT"]) + istream # connect to recv agent and provide observation configuration self.log(3, "issue_stop_cmd: openSocket("+host+","+str(ctrl_port)+")") sock = sockets.openSocket (DL, host, ctrl_port, 1) if sock: self.log(3, "issue_stop_cmd: sending obs_header len=" + str(len(obs_header))) sock.send(obs_header) self.log(3, "issue_stop_cmd: command sent") sock.close() self.log(3, "issue_stop_cmd: socket closed") # connect to spip_gen and issue stop command for UTC # assumes gen host is the same as the recv host! # gen_port = int(self.cfg["STREAM_GEN_PORT"]) + istream # sock = sockets.openSocket (DL, host, gen_port, 1) # if sock: # sock.send(obs_header) # sock.close() # update the dict of observing info for this beam self.beam_states[b]["lock"].acquire() self.beam_states[b]["state"] = "Idle" self.beam_states[b]["lock"].release()
def issue_start_cmd(self, line): self.log(2, "issue_start_cmd()") # check calibration parameters to see if we can enable the CAL processing if self.get_param("calibration_parameters", "signal") == "1": # read the cal freq calfreq_str = float( self.get_param("calibration_parameters", "freq")) self.info("calibration_parmaeters::freq=" + str(calfreq_str)) valid_cal = False try: calfreq_int = int(calfreq_str) if calfreq_int > 0 and calfreq_str == float(calfreq_int): valid_cal = True self.info("calibration_parmaeters::freq [" + str(calfreq_str) + "] was an integer as [" + str(calfreq_int) + "]") else: self.info("calibration_parmaeters::freq [" + str(calfreq_str) + "] was not > 0") except: self.info("calibration_parmaeters::freq [" + str(calfreq_str) + "] was not an integer") # if the calfreq is a positive integer, assume 0.5 duty cycle and 0.0 phase if valid_cal: self.set_param("calibration_parameters", "duty_cycle", "0.5") self.set_param("calibration_parameters", "phase", "0.5") self.set_param("calibration_parameters", "tsys_avg_time", "10") self.set_param("calibration_parameters", "tsys_freq_resolution", "1") # generate a fake epoch for the CAL fake_cal_epoch = times.getUTCTime() self.set_param("calibration_parameters", "epoch", fake_cal_epoch) else: self.set_param("calibration_parameters", "signal", "0") self.set_param("calibration_parameters", "epoch", "Unknown") script.beam_cfg["obs_cmd"]["command"] = "configure" xml = xmltodict.unparse(script.beam_cfg) self.log(2, "issue_start_cmd xml=" + str(xml)) sock = sockets.openSocket(DL, self.host, self.spip_tcs_port, 1) if sock: self.log(1, "UWB_TCS <- configure") sock.send(xml + "\r\n") xml_reply = sock.recv(131072) sock.close() reply = xmltodict.parse(xml_reply) self.log(1, "UWB_TCS -> " + reply["tcs_response"]) if reply["tcs_response"] != "OK": self.log( -1, "TCSInterfaceDaemon::issue_start_cmd: bad configuration: " + reply["tcs_response"]) return reply["tcs_response"] else: self.log( -1, "TCSInterfaceDaemon::issue_start_cmd could not connect to uwb_tcs" ) return "FAIL: could not connect to Medusa's TCS service" # the configure command did work, start! script.beam_cfg["obs_cmd"]["command"] = "start" utc_start = times.getUTCTime(10) self.set_param("observation_parameters", "utc_start", utc_start) xml = xmltodict.unparse(script.beam_cfg) # convert dict into XML to send to spip_tcs sock = sockets.openSocket(DL, self.host, self.spip_tcs_port, 1) if sock: self.log(1, "UWB_TCS <- start") sock.send(xml + "\r\n") xml_reply = sock.recv(131072) sock.close() reply = xmltodict.parse(xml_reply) self.log(1, "UWB_TCS -> " + reply["tcs_response"]) if reply["tcs_response"] != "OK": self.log( -1, "TCSInterfaceDaemon::issue_start_cmd: bad configuration: " + reply["tcs_response"]) return reply["tcs_response"] else: return "start_utc " + utc_start else: self.log( 1, "TCSInterfaceDaemon::issue_start_cmd could not connect to spip_tcs" ) return "internal medusa error"
def main(self, id): # connect to the various scripts running to collect the information # to be provided by the KATCPServer instance as sensors time.sleep(2) while not self.quit_event.isSet(): # the primary function of the KATCPDaemon is to update the # sensors in the DeviceServer periodically # TODO compute overall device status self.katcp._device_status.set_value("ok") # connect to SPIP_LMC to retreive temperature information if self.quit_event.isSet(): return (host, port) = self.lmc.split(":") self.log(2, "KATCPDaemon::main openSocket(" + host + "," + port + ")") try: sock = sockets.openSocket(DL, host, int(port), 1) if sock: sock.send(self.lmc_cmd) lmc_reply = sock.recv(65536) xml = xmltodict.parse(lmc_reply) sock.close() if self.quit_event.isSet(): return self.log( 3, "KATCPDaemon::main update_lmc_sensors(" + host + ",[xml])") self.update_lmc_sensors(host, xml) except socket.error as e: if e.errno == errno.ECONNRESET: self.log(1, "lmc connection was unexpectedly closed") sock.close() # connect to SPIP_REPACK to retrieve Pulsar SNR performance if self.quit_event.isSet(): return (host, port) = self.repack.split(":") try: sock = sockets.openSocket(DL, host, int(port), 1) if sock: sock.send(self.repack_cmd) repack_reply = sock.recv(65536) xml = xmltodict.parse(repack_reply) sock.close() if self.quit_event.isSet(): return self.log( 3, "KATCPDaemon::main update_repack_sensors(" + host + ",[xml])") self.update_repack_sensors(host, xml) except socket.error as e: if e.errno == errno.ECONNRESET: self.log(1, "repack connection was unexpectedly closed") sock.close() to_sleep = 5 while not self.quit_event.isSet() and to_sleep > 0: to_sleep -= 1 time.sleep(1) if not self.katcp.running(): self.log(-2, "KATCP server was not running, exiting") self.quit_event.set()
def issue_start_cmd(self, xml): # determine which beams this command corresponds to for ibeam in range(int(xml['obs_cmd']['beam_configuration']['nbeam'])): if xml['obs_cmd']['beam_configuration'][ 'beam_state_' + str(ibeam)]['#text'] == "on": b = xml['obs_cmd']['beam_configuration']['beam_state_' + str(ibeam)]['@name'] if b in self.beam_states.keys(): obs = {} self.beam_states[b]["lock"].acquire() obs["COMMAND"] = "START" obs["SOURCE"] = self.beam_states[b]["source"] obs["RA"] = self.beam_states[b]["ra"] obs["DEC"] = self.beam_states[b]["dec"] obs["TOBS"] = self.beam_states[b]["tobs"] obs["OBSERVER"] = self.beam_states[b]["observer"] obs["PID"] = self.beam_states[b]["pid"] obs["MODE"] = self.beam_states[b]["mode"] obs["CALFREQ"] = self.beam_states[b]["calfreq"] obs["OBS_OFFSET"] = "0" # if no UTC_START has been specified, set it to +5 seconds if self.beam_states[b]["utc_start"] == None: self.beam_states[b]["utc_start"] = times.getUTCTime(5) obs["UTC_START"] = self.beam_states[b]["utc_start"] # inject custom fields into header for f in self.beam_states[b]["custom_fields"].split(' '): obs[f.upper()] = self.beam_states[b][f] self.beam_states[b]["lock"].release() obs["PERFORM_FOLD"] = "1" obs["PERFORM_SEARCH"] = "0" obs["PERFORM_TRANS"] = "0" # convert to a single ascii string obs_header = Config.writeDictToString(obs) self.log(1, "issue_start_cmd: beam=" + b) # work out which streams correspond to these beams for istream in range(int(self.cfg["NUM_STREAM"])): (host, beam_idx, subband) = self.cfg["STREAM_" + str(istream)].split(":") beam = self.cfg["BEAM_" + beam_idx] self.log( 2, "issue_start_cmd: host=" + host + " beam=" + beam + " subband=" + subband) # connect to streams for this beam only if beam == b: # control port the this recv stream ctrl_port = int( self.cfg["STREAM_CTRL_PORT"]) + istream # connect to recv agent and provide observation configuration self.log( 3, "issue_start_cmd: openSocket(" + host + "," + str(ctrl_port) + ")") recv_sock = sockets.openSocket( DL, host, ctrl_port, 1) if recv_sock: self.log( 3, "issue_start_cmd: sending obs_header") recv_sock.send(obs_header) self.log(3, "issue_start_cmd: header sent") recv_sock.close() self.log(3, "issue_start_cmd: socket closed") # connect to spip_gen and issue start command for UTC # assumes gen host is the same as the recv host! # gen_port = int(self.cfg["STREAM_GEN_PORT"]) + istream # sock = sockets.openSocket (DL, host, gen_port, 1) # if sock: # sock.send(obs_header) # sock.close() # update the dict of observing info for this beam self.beam_states[b]["lock"].acquire() self.beam_states[b]["state"] = "Recording" self.beam_states[b]["lock"].release()