def test_schedules_off(self): """ debug code only """ sched0 = ( 2.0, [2.0], [0.0], time_spec_t(0)) sched1 = ( 2.0, [0], [0.0], time_spec_t(0)) sched_list = [sched0, sched1] self.set_schedules(sched_list)
def work(self, input_items, output_items): #process streaming samples and tags here in0 = input_items[0] nread = self.nitems_read(0) #number of items read on port 0 ninput_items = len(input_items[0]) #read all tags associated with port 0 for items in this work function tags = self.get_tags_in_range(0, nread, nread + ninput_items) if len(tags) > 0: self._dev_logger.debug("beacon consumer found new tags") for tag in tags: key_string = pmt.pmt_symbol_to_string(tag.key) if key_string == "rx_time": current_integer, current_fractional = pmt.to_python(tag.value) self.timestamp = time_spec_t(current_integer + current_fractional) self.floored_timestamp = time_spec_t(current_integer) self.time_offset = tag.offset self.found_time = True #print "rx time found: %s at offset %ld" %(self.timestamp, self.time_offset) elif key_string == "rx_rate": #print "rx rate found" self.rate = pmt.to_python(tag.value) self.sample_period = 1 / self.rate self.found_rate = True # only clear out old packets if the time and rate are known if self.found_rate & self.found_time: #print "nread: %ld ninput_items: %ld self.time_offset %ld" % (nread, ninput_items, self.time_offset) t_end = (nread + ninput_items - self.time_offset) * self.sample_period + self.timestamp #print "t_end is %s" % t_end self._beacon_lock.acquire() self.cull_stale_beacons(t_end) num_beacons = len(self._beacon_list) self._beacon_lock.release() # if there aren't any valid beacons left in the queue, declare the sync was # lost if num_beacons == 0: self.sync_lost(t_end) return ninput_items
def __init__(self, item_size, num_outputs, fs, schedule_list=None): """ Selector constructor. @param item_size the size of the gr data stream in bytes @param num_outputs the number of outputs (integer) @param schedule_list list of schedules, one per output. """ gr.hier_block2.__init__( self, 'selector', gr.io_signature(1, 1, item_size), gr.io_signature(num_outputs, num_outputs, item_size), ) self._dev_logger = logging.getLogger('developer') #slot selector blocks self.select_blocks = [] for i in range(num_outputs): if hasattr(schedule_list, "len") and ( len(schedule_list) >i): frame_len, slot_lens, slot_offsets, frame_t0, stream_t0 = schedule_list[i] else: frame_len = 1.333 if i == 0: slot_lens = [1.0] else: slot_lens = [0.0] slot_offsets = [0.0] slot_nums = [0] frame_t0 = time_spec_t(0) stream_t0 = time_spec_t(0) self.select_blocks.append(slot_selector(item_size, frame_len, slot_lens, slot_offsets, frame_t0.int_s(), frame_t0.frac_s(), stream_t0.int_s(), stream_t0.frac_s(), fs )) #connections for i in range(num_outputs): self.connect(self, self.select_blocks[i], (self, i)) self.item_size = item_size self.num_outputs = num_outputs
def __init__(self, tx_time=None, frame_offset=None, time_ref=None, first_frame_num=None, frame_num_ref=None, valid=None): ''' Initialize required data members ''' if tx_time is not None: self.tx_time = time_spec_t(tx_time) self.frame_offset = frame_offset if time_ref is not None: self.time_ref = time_spec_t(time_ref) self.first_frame_num = first_frame_num self.frame_num_ref = frame_num_ref self.valid = valid
def work(self, input_items, output_items): #process streaming samples and tags here in0 = input_items[0] nread = self.nitems_read(0) #number of items read on port 0 ninput_items = len(input_items[0]) #read all tags associated with port 0 for items in this work function tags = self.get_tags_in_range(0, nread, nread+ninput_items) if len(tags) > 0: self._dev_logger.debug("beacon consumer found new tags") for tag in tags: key_string = pmt.pmt_symbol_to_string(tag.key) if key_string == "rx_time": current_integer,current_fractional = pmt.to_python(tag.value) self.timestamp = time_spec_t(current_integer + current_fractional) self.floored_timestamp = time_spec_t(current_integer) self.time_offset = tag.offset self.found_time = True #print "rx time found: %s at offset %ld" %(self.timestamp, self.time_offset) elif key_string == "rx_rate": #print "rx rate found" self.rate = pmt.to_python(tag.value) self.sample_period = 1/self.rate self.found_rate = True # only clear out old packets if the time and rate are known if self.found_rate & self.found_time: #print "nread: %ld ninput_items: %ld self.time_offset %ld" % (nread, ninput_items, self.time_offset) t_end = (nread + ninput_items - self.time_offset)*self.sample_period + self.timestamp #print "t_end is %s" % t_end self._beacon_lock.acquire() self.cull_stale_beacons(t_end) num_beacons = len(self._beacon_list) self._beacon_lock.release() # if there aren't any valid beacons left in the queue, declare the sync was # lost if num_beacons == 0: self.sync_lost(t_end) return ninput_items
def add_schedule(self, time_ref, frame_num_ref, first_frame_num, action_ind, epoch_num=None): ''' Add a schedule to the end of the schedule queue, and if the queue is over capacity, pop off the oldest element ''' self.schedule_seq.insert((time_spec_t(time_ref).to_tuple(), frame_num_ref, first_frame_num, action_ind, epoch_num)) if len(self.schedule_seq) > self.max_scheds: # find the first element in the list when sorted by frame number self.schedule_seq.remove(self.schedule_seq[0])
def expand(self): ''' Reverse a compact operation ''' if self.time_ref is not None: self.time_ref = time_spec_t(self.time_ref) if self.tx_time is not None: self.tx_time = time_spec_t(self.tx_time) if self.slots is not None: for k, slot in enumerate(self.slots): self.slots[k] = SlotParamTuple(*slot) if self.old_frame_config is not None: for k, slot in enumerate(self.old_frame_config["slots"]): self.old_frame_config["slots"][k] = SlotParamTuple(*slot) self.old_frame_config["t0"] = time_spec_t(self.old_frame_config["t0"])
def add_tx_packets(self, packet_list, frame_num, packet_overhead, types_to_ints): ''' Add a list of packets to the database. Packet list items are tuples of (meta, data) ''' # if time_ref hasn't been loaded yet, try to load it if self.time_ref is None: self.load_time_ref() if self.time_ref is None: self.dev_log.warning("Could not load time reference from database, so cannot store packets") return try: # add packets to database with self.con as c: for (meta, data) in packet_list: if meta["pktCode"] != types_to_ints["beacon"]: # get packet timestamp to be in respect to the database time reference packet_timestamp = float(time_spec_t(meta["timestamp"])-self.time_ref) payload_bits = len(data)*8 total_bits = payload_bits + packet_overhead*8 packet_params= (meta["fromID"], meta["toID"], meta["sourceID"], meta["destinationID"], meta["packetid"], meta["pktCode"], meta["linkdirection"], meta["frequency"],meta["timeslotID"], frame_num, packet_timestamp, "pending", payload_bits, total_bits) # add to the slot table c.execute("insert into packets" + "(from_id, to_id, source_id, destination_id, packet_num," + " packet_code, link_direction, channel_num, slot_num," + " frame_num, packet_timestamp, status, payload_bits, total_bits) " + "values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", packet_params) # c.execute("UPDATE slots SET payload_bits=( SUM(payload_bits) ) except sqlite3.Error as err: self.dev_log.exception("error inserting tx packet:%s.%s: %s", err.__module__, err.__class__.__name__, err.message) except KeyError as err: self.dev_log.exception("key error: meta contents: %s ", meta) raise KeyError
def __init__(self, flush_db=False, time_ref=None, db_name="/tmp/ram/performance_history.sqlite"): # fire up logging service self.dev_log = logging.getLogger('developer') self.x_log = logging.getLogger('exceptions') if time_ref is not None: self.time_ref = time_spec_t(math.floor(time_ref)) self.dev_log.debug("setting database time reference to %s", self.time_ref) else: self.time_ref = None self.dev_log.debug( "not setting database time at init. This must be done after " + " the database has been initialized by calling load_time_ref()" ) # open database file try: self.dev_log.debug("connecting to database file %s", db_name) self.con = sqlite3.connect(db_name) self.dev_log.debug("database connection successful") except sqlite3.OperationalError as err: self.dev_log.exception( "Could not open database file named %s.\n" + "If using a file on a ramdisk, try using:\n" + "mkdir -p /tmp/ram\n" + "sudo mount -t tmpfs -o size=512M tmpfs /tmp/ram\n", db_name) quit() db_expanded_name = os.path.expandvars(os.path.expanduser(db_name)) self._db_path = os.path.dirname(os.path.abspath(db_expanded_name)) self._db_basename = os.path.basename(os.path.abspath(db_expanded_name)) # use Row wrapper so you can get at rows using field names and/or indexes self.con.row_factory = sqlite3.Row # make all text ascii only self.con.text_factory = str if flush_db: try: self.dev_log.debug("initializing database") self.init_database() self.dev_log.debug("database initialization complete") except Exception as error: self.dev_log.exception( "Could not initialize the database: Exception %s", error) quit()
def __init__(self, frame_config, tx_time=None, frame_offset=None, time_ref=None, first_frame_num=None, frame_num_ref=None, valid=None, ): ''' Store frame config ''' if frame_config is not None: if time_ref is None: time_ref = time_spec_t(frame_config["t0"]) super(SimpleFrameSchedule, self).__init__(tx_time, frame_offset, time_ref, first_frame_num, frame_num_ref, valid) self._frame_config = deepcopy(frame_config)
def __init__(self, tx_time=None, frame_offset=None, time_ref=None, first_frame_num=None, frame_num_ref=None, valid=None, frame_config=None, num_channels=1): # set up safe defaults that can be overwritten as needed self.first_frame_num = 0 self.frame_num_ref = 0 if frame_config is not None: self.frame_len = frame_config["frame_len"] self.slots = deepcopy(frame_config["slots"]) self.num_time_slots = len(self.slots) if time_ref is None: self.time_ref = time_spec_t(frame_config["t0"]) if valid is None: self.valid = frame_config["valid"] self.old_frame_config = self.compute_frame() self.num_freq_slots = num_channels # initialize params if specifically called out, otherwise load from frame config if time_ref is not None: self.time_ref = time_spec_t(time_ref) if frame_num_ref is not None: self.frame_num_ref = frame_num_ref if first_frame_num is not None: self.first_frame_num = first_frame_num if valid is not None: self.valid = valid
def __init__(self, flush_db=False, time_ref=None, db_name="/tmp/ram/performance_history.sqlite"): # fire up logging service self.dev_log = logging.getLogger('developer') self.x_log = logging.getLogger('exceptions') if time_ref is not None: self.time_ref = time_spec_t(math.floor(time_ref)) self.dev_log.debug("setting database time reference to %s", self.time_ref) else: self.time_ref = None self.dev_log.debug("not setting database time at init. This must be done after " + " the database has been initialized by calling load_time_ref()") # open database file try: self.dev_log.debug("connecting to database file %s", db_name) self.con = sqlite3.connect(db_name) self.dev_log.debug("database connection successful") except sqlite3.OperationalError as err: self.dev_log.exception("Could not open database file named %s.\n" + "If using a file on a ramdisk, try using:\n" + "mkdir -p /tmp/ram\n" + "sudo mount -t tmpfs -o size=512M tmpfs /tmp/ram\n", db_name) quit() db_expanded_name = os.path.expandvars(os.path.expanduser(db_name)) self._db_path = os.path.dirname(os.path.abspath(db_expanded_name)) self._db_basename = os.path.basename(os.path.abspath(db_expanded_name)) # use Row wrapper so you can get at rows using field names and/or indexes self.con.row_factory = sqlite3.Row # make all text ascii only self.con.text_factory = str if flush_db: try: self.dev_log.debug("initializing database") self.init_database() self.dev_log.debug("database initialization complete") except Exception as error: self.dev_log.exception("Could not initialize the database: Exception %s", error) quit()
def __init__(self, tx_time=None, frame_offset=None, time_ref=None, first_frame_num=None, frame_num_ref=None, valid=None, tx_gain=None, max_schedules=2, action_ind=None, rf_freq=None, slot_bw=0.0, epoch_num=None): if tx_time is not None: self.tx_time = time_spec_t(tx_time) else: self.tx_time = None self.frame_offset = frame_offset self.valid = valid # this is the list of schedule states this schedule object knows about. # The schedules are ordered by first_frame_num self.schedule_seq = SortedCollection(key=itemgetter(2)) self.max_scheds = max_schedules # use a default dict so slots with no initialized gain will use the default tx # gain self.tx_gain = tx_gain self.gains = defaultdict(self.constant_factory(self.tx_gain)) # set default values for all controllable parameters. These are what will be used # if the action space doesn't specify a value self.rf_freq = rf_freq self.slot_bw = slot_bw first_state = (time_ref, frame_num_ref, first_frame_num, action_ind, epoch_num) # only add the initial state if all the necessary params are defined if all( v is not None for v in first_state): self.add_schedule(*first_state)
def process_raw_incoming_queue(self): while not self.raw_incoming_q.empty(): (ok, payload, timestamp, channel) = self.raw_incoming_q.get() # if packet passed CRC if ok: meta, data = self.mac_sm.unpack_tdma_header(payload = payload) self.dev_logger.info("Packet %i metadata says channel %i and was received on channel %i", meta["packetid"],meta["frequency"], channel) if meta is None: meta = {} meta["crcpass"] = True meta["timestamp"] = (time_spec_t(timestamp)) meta["messagelength"] = len(payload) # hack to work around frame number wrap around in packet headers if self.frame_count - meta["frameID"] > TDMA_HEADER_MAX_FIELD_VAL/2: # compute how many times the frame count has overflowed the packet frame # num field num_wraps = floor(self.frame_count/TDMA_HEADER_MAX_FIELD_VAL) # update the packet metadata what its actual value likely was meta["frameID"] = int(num_wraps*(TDMA_HEADER_MAX_FIELD_VAL) + meta["frameID"]) # make sure we didn't go one wrap too far if meta["frameID"] > self.frame_count: meta["frameID"] = meta["frameID"] - TDMA_HEADER_MAX_FIELD_VAL; # self.dev_logger.debug("pktID: %d code: %d slot: %d",meta["packetid"], # meta["pktCode"], meta["timeslotID"]) if ("toID" in meta) and ("packetid" in meta) and ("pktCode" in meta): if meta["toID"] != self.mac_config["my_id"]: self.dev_logger.warning("received packet number %d, type %d, in slot %d, frame %d, addressed to %d at timestamp %s", meta["packetid"], meta["pktCode"], meta["timeslotID"], meta["frameID"], meta["toID"], meta["timestamp"]) else: # self.dev_logger.debug("received packet number %d, type %d, in slot %d", # meta["packetid"], meta["pktCode"], # meta["timeslotID"]) pass if len(self.incoming_q) < self.incoming_q.maxlen: self.incoming_q.append((meta, data)) else: # if dropping the packet, also log it as a drop meta["direction"] = "drop" #if "frequency" in meta.keys(): # meta["frequency"] = self.convert_channel_to_hz(meta["frequency"]) meta_copy = deepcopy(meta) self.ll_logging.packet(meta_copy) # crc failed else: meta = {"crcpass":False} meta["timestamp"] = (time_spec_t(timestamp)) meta["messagelength"] = len(payload) data = None # add in packet so we can get a more accurate BER estimate self.incoming_q.append((meta, data)) # add other metadata not in the over the air packet meta["linkdirection"] = self.rx_pkt_dir meta["direction"] = "receive" # always log that we received the packet #if "frequency" in meta.keys(): # meta["frequency"] = self.convert_channel_to_hz(meta["frequency"]) meta_copy = deepcopy(meta) self.ll_logging.packet(meta_copy)
def add_rx_packets(self, packet_list, packet_overhead, status, types_to_ints): ''' Add a list of packets to the database. Packet list items are tuples of (meta, data) ''' # if time_ref hasn't been loaded yet, try to load it if self.time_ref is None: self.load_time_ref() if self.time_ref is None: self.dev_log.warning( "Could not load time reference from database, so cannot store packets" ) return try: # add packets to database with self.con as c: for (meta, data) in packet_list: # get packet timestamp to be in respect to the database time reference packet_timestamp = float( time_spec_t(meta["timestamp"]) - self.time_ref) packet_payload_bits = len(data) * 8 packet_total_bits = packet_payload_bits + packet_overhead * 8 packet_params = (meta["fromID"], meta["toID"], meta["sourceID"], meta["destinationID"], meta["packetid"], meta["pktCode"], meta["linkdirection"], meta["frequency"], meta["timeslotID"], meta["frameID"], packet_timestamp, status, packet_payload_bits, packet_total_bits) # add slot byte info so we can update the dummy packets in # the database with accurate numbers of bytes sent slot_bytes_params = ( meta["slot_payload_bytes"] * 8, meta["slot_total_bytes"] * 8, packet_payload_bits, packet_total_bits, meta["frameID"], meta["timeslotID"], meta["frequency"], ) # add to the slot table c.execute( "insert into packets" + "(from_id, to_id, source_id, destination_id, packet_num," + " packet_code, link_direction, channel_num, slot_num," + " frame_num, packet_timestamp, status, payload_bits, total_bits) " + "values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", packet_params) # # if there are new answers to the total number of bytes sent in this slot, # # and the number of payload bytes in this slot, update the database # if len(slot_bytes_params)>0: # c.executemany("UPDATE slots SET payload_bits=?,total_bits=? " + # "WHERE frame_num=? AND slot_num=? " + # "AND channel_num=?",list(slot_bytes_params)) c.execute( """ UPDATE pending_rx_slots SET payload_bits=?, total_bits=?, passed_payload_bits = pending_rx_slots.passed_payload_bits + ?, passed_total_bits = pending_rx_slots.passed_total_bits + ? WHERE frame_num=? AND slot_num=? AND channel_num=?""", slot_bytes_params) except sqlite3.Error as err: self.dev_log.exception("error inserting rx packet: %s.%s: %s", err.__module__, err.__class__.__name__, err.message)
def add_tx_packets(self, packet_list, frame_num, packet_overhead, types_to_ints): ''' Add a list of packets to the database. Packet list items are tuples of (meta, data) ''' # if time_ref hasn't been loaded yet, try to load it if self.time_ref is None: self.load_time_ref() if self.time_ref is None: self.dev_log.warning( "Could not load time reference from database, so cannot store packets" ) return try: # add packets to database with self.con as c: for (meta, data) in packet_list: if meta["pktCode"] != types_to_ints["beacon"]: # get packet timestamp to be in respect to the database time reference packet_timestamp = float( time_spec_t(meta["timestamp"]) - self.time_ref) payload_bits = len(data) * 8 total_bits = payload_bits + packet_overhead * 8 packet_params = (meta["fromID"], meta["toID"], meta["sourceID"], meta["destinationID"], meta["packetid"], meta["pktCode"], meta["linkdirection"], meta["frequency"], meta["timeslotID"], frame_num, packet_timestamp, "pending", payload_bits, total_bits) # add to the slot table c.execute( "insert into packets" + "(from_id, to_id, source_id, destination_id, packet_num," + " packet_code, link_direction, channel_num, slot_num," + " frame_num, packet_timestamp, status, payload_bits, total_bits) " + "values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", packet_params) # c.execute("UPDATE slots SET payload_bits=( SUM(payload_bits) ) except sqlite3.Error as err: self.dev_log.exception("error inserting tx packet:%s.%s: %s", err.__module__, err.__class__.__name__, err.message) except KeyError as err: self.dev_log.exception("key error: meta contents: %s ", meta) raise KeyError
def process_command_queue(self): self.reservation.acquire() if len(self.time_gain_tuple_list) > 0: #need to rewrite code to avoid calling get_time_now() #current_uhd_time = self.uhd_sink.u.get_time_now() #current_real_uhd_time = current_uhd_time.get_real_secs() #current_frac_uhd_time = current_uhd_time.get_frac_secs() #type double #current_time_spec_t_uhd_time = time_spec_t( math.floor(current_real_uhd_time), current_frac_uhd_time) #alternative to keep calling uhd time t1 = time.time() - self.current_time_ahead current_time_spec_t_uhd_time = time_spec_t(t1) #print "current uhd time is %s" % current_time_spec_t_uhd_time #pprint(self.time_gain_tuple_list) pop_list = [] m = 0 n = 0 #self.u.set_center_freq(uhd.tune_request(freq,10e6), 0) for idx, (cmd_time, cmd_value, cmd_type) in enumerate(self.time_gain_tuple_list): if cmd_type == "time_cal": # self.dev_log.info("received calibration command at uhd time %s with command time %s and value %s. Delta is %f", # current_time_spec_t_uhd_time, cmd_time, cmd_value, # cmd_time-current_time_spec_t_uhd_time) # command_time_delta = cmd_time-current_time_spec_t_uhd_time # # the time cal command should come in very close to the current time. # # if it's more than a half a second off in either direction, the uhd # # time is probably off by a second. # if command_time_delta < -.5: # self.dev_log.info("Time cal command is more than half a second " + # "in the past. Adjusting current time ahead " + # "from %f to %f",self.current_time_ahead, # self.current_time_ahead+1 ) # # self.current_time_ahead+=1 # elif command_time_delta > .5: # self.dev_log.info("Time cal command is more than half a second " + # "in the future. Adjusting current time ahead " + # "from %f to %f",self.current_time_ahead, # self.current_time_ahead-1 ) # # self.current_time_ahead-=1 pop_list.append(idx) n = n + 1 elif cmd_time <= current_time_spec_t_uhd_time: #print "stale time %s, remove invalid command" % str(cmd_time) pop_list.append(idx) m = m + 1 if cmd_type =="txrx_tune": self.dev_log.warn("Tune command time of %s is stale. Current time is %s", cmd_time, current_time_spec_t_uhd_time) self.log_tune_command(cmd_time, cmd_value, True) elif (cmd_time - current_time_spec_t_uhd_time) <= self.period: #print "submit command with time %s to UHD queue" % str(cmd_time) uhd_cmd_time_builder = uhd_time_spec_t_builder(cmd_time.int_s(), cmd_time.frac_s()) uhd_cmd_time = uhd_cmd_time_builder.time_spec_t() if cmd_type == 'tx_gain': self.uhd_sink.u.set_command_time(uhd_cmd_time) self.uhd_sink.u.set_gain(cmd_value,0) self.uhd_sink.u.clear_command_time() elif cmd_type =="txrx_tune": self.dev_log.debug("Tuning tx and rx to %f at command time %s, current time %s", cmd_value, cmd_time, current_time_spec_t_uhd_time) self.uhd_sink.u.set_command_time(uhd_cmd_time) self.uhd_sink.u.set_center_freq(uhd.tune_request(cmd_value,10e6), 0) self.uhd_sink.u.clear_command_time() self.uhd_source.u.set_command_time(uhd_cmd_time) self.uhd_source.u.set_center_freq(uhd.tune_request(cmd_value,10e6), 0) self.uhd_source.u.clear_command_time() self.log_tune_command(cmd_time, cmd_value, False) pop_list.append(idx) n = n + 1 else: #assume command time list is in increasing order break #need to sort pop_list in reverse, otherwise won't pop correctly for idx in sorted(pop_list, reverse=True): self.time_gain_tuple_list.pop(idx) #print "Drop %i stale commands and submit %i commands to UHD queue" % (m,n) if m > 0: self.dev_log.debug("Dropped %i stale commands to UHD queue", m) #if too many drops, reset entire queue so there's a chance to recover if m >= self.max_drops: self.time_gain_tuple_list = [] self.reservation.release()
def parse_frame_file(frame_file, t0, fs): #dtd = etree.DTD(dtd_file) logger = logging.getLogger('developer') slot_fields = dict([("owner","int"), ("len","float"), ("offset","float"), ("type","string"), ("rf_freq","float"), ("bb_freq","int"), ("bw","float")]) parser = etree.XMLParser(dtd_validation=True) filepath = os.path.expandvars(os.path.expanduser(frame_file)) filepath = os.path.abspath(filepath) xml = etree.parse(filepath, parser) root = xml.getroot() # convert xml tree to dictionary raw_frame = etree_to_dict(root) raw_frame = raw_frame["frame"] frame_config = {}; # build up frame config dictionary frame_config["t0"] = time_spec_t(t0) frame_config["frame_len"] = float(raw_frame["frame_len"]) # convert each field of each slot to the correct type raw_frame["slots"] = [convert_types(slot, slot_fields) for slot in raw_frame["slots"]] # add a placeholder tx_gain field to each slot for k in range(len(raw_frame["slots"])): raw_frame["slots"][k]["tx_gain"]=0 # now store off all the slots as a list of named tuples frame_config["slots"] = [SlotParamTuple(**slot) for slot in raw_frame["slots"]] # sort slots by order of offset frame_config["slots"].sort(key=lambda slot: slot.offset) # # enforce slot/frame boundaries occur at integer samples # # check that t0 is at an integer sample t0_frac = frame_config["t0"].frac_s() t0_frac_rounded = round(t0_frac*fs)/fs if t0_frac != t0_frac_rounded: logger.warn("rounding fractional seconds from %.15f to %.15f", t0_frac, t0_frac_rounded) frame_config["t0"] = time_spec_t(frame_config["t0"].int_s(), t0_frac_rounded) # check that frame len is at an integer sample frame_len_rounded = round(frame_config["frame_len"]*fs)/fs if frame_config["frame_len"] != frame_len_rounded: logger.warn("rounding frame len from %.15f to %.15f", frame_config["frame_len"], frame_len_rounded) frame_config["frame_len"] = frame_len_rounded try: # do a limited amount of error checking for num, slot in enumerate(frame_config["slots"]): offset_rounded = round(slot.offset*fs)/fs len_rounded = round(slot.len*fs)/fs if slot.offset != offset_rounded: logger.warn("rounding slot %d offset from %.15f to %.15f", num, slot.offset, offset_rounded) if slot.len != len_rounded: logger.warn("rounding slot %d len from %.15f to %.15f", num, slot.len, len_rounded) # more precision fun end_of_slot = round( (offset_rounded + len_rounded)*fs)/fs if end_of_slot > frame_config["frame_len"]: raise InvalidFrameError(("slot %d with offset %f and len %f extends past " + "the end of the frame, len %f") % (num, slot.offset, slot.len, frame_config["frame_len"])) frame_config["slots"][num] = slot._replace(offset=offset_rounded, len=len_rounded) except InvalidFrameError, err: logger.error("Invalid Frame: %s", err.msg) raise
def compute_frame(self, frame_num=None): ''' Given a frame number, produce an individual frame configuration ''' if frame_num is None: sched = self.stateTup(*self.schedule_seq[0]) else: try: sched_tup = self.schedule_seq.find_le(frame_num) except ValueError: sched_tup = self.schedule_seq[0] sched = self.stateTup(*sched_tup) #print "Frame num is %i, action ind is %i"%(frame_num, sched.action_ind) #print "schedule sequence is %s"%self.schedule_seq action = self._action_space[sched.action_ind] # if "pattern" not in action: # # TODO: Make a better exception for when there aren't any patterns # raise KeyError("Expected at least one pattern object in the action space") # else: frame_len = action["frame_len"] frame_delta = frame_num - sched.frame_num_ref t0 = time_spec_t(sched.time_ref) + frame_len*frame_delta frame_config = {"frame_len":frame_len, "t0":t0, "t0_frame_num":frame_num, "first_frame_num":sched.first_frame_num, "valid":self.valid, "epoch_num":sched.epoch_num, } # get all the parameters needed for computing each slot in frame_config if "rf_freq" in action: rf_freq = action["rf_freq"] else: rf_freq = self.rf_freq # get the list of gains per slot act_slots = action["slots"] gains = [ self.gains[(s.owner, s.type)] for s in act_slots] slots = [SlotParamTuple(owner=s.owner, len=s.len, offset=s.offset, type=s.type, rf_freq=rf_freq, bb_freq=s.bb_freq, bw=self.slot_bw, tx_gain=gain) for gain, s in zip(gains, act_slots)] frame_config["slots"] = slots for s in slots: if s.type == "beacon": pass #print ("frame at time %s beacon slot at offset %f fr freq %f and " # +"channel %f")%(frame_config["t0"], s.offset, s.rf_freq, s.bb_freq) return frame_config
def time_ref(self): return time_spec_t(self.schedule_seq[-1][0])
def add_rx_packets(self, packet_list, packet_overhead, status, types_to_ints): ''' Add a list of packets to the database. Packet list items are tuples of (meta, data) ''' # if time_ref hasn't been loaded yet, try to load it if self.time_ref is None: self.load_time_ref() if self.time_ref is None: self.dev_log.warning("Could not load time reference from database, so cannot store packets") return try: # add packets to database with self.con as c: for (meta, data) in packet_list: # get packet timestamp to be in respect to the database time reference packet_timestamp = float(time_spec_t(meta["timestamp"])-self.time_ref) packet_payload_bits = len(data)*8 packet_total_bits = packet_payload_bits + packet_overhead*8 packet_params= (meta["fromID"], meta["toID"], meta["sourceID"], meta["destinationID"], meta["packetid"], meta["pktCode"], meta["linkdirection"], meta["frequency"],meta["timeslotID"], meta["frameID"],packet_timestamp, status, packet_payload_bits, packet_total_bits) # add slot byte info so we can update the dummy packets in # the database with accurate numbers of bytes sent slot_bytes_params = (meta["slot_payload_bytes"]*8, meta["slot_total_bytes"]*8, packet_payload_bits, packet_total_bits, meta["frameID"], meta["timeslotID"], meta["frequency"], ) # add to the slot table c.execute("insert into packets" + "(from_id, to_id, source_id, destination_id, packet_num," + " packet_code, link_direction, channel_num, slot_num," + " frame_num, packet_timestamp, status, payload_bits, total_bits) " + "values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", packet_params) # # if there are new answers to the total number of bytes sent in this slot, # # and the number of payload bytes in this slot, update the database # if len(slot_bytes_params)>0: # c.executemany("UPDATE slots SET payload_bits=?,total_bits=? " + # "WHERE frame_num=? AND slot_num=? " + # "AND channel_num=?",list(slot_bytes_params)) c.execute(""" UPDATE pending_rx_slots SET payload_bits=?, total_bits=?, passed_payload_bits = pending_rx_slots.passed_payload_bits + ?, passed_total_bits = pending_rx_slots.passed_total_bits + ? WHERE frame_num=? AND slot_num=? AND channel_num=?""", slot_bytes_params ) except sqlite3.Error as err: self.dev_log.exception("error inserting rx packet: %s.%s: %s", err.__module__, err.__class__.__name__, err.message)
def work(self, input_items, output_items): #print "tdma controller work" #process streaming samples and tags here in0 = input_items[0] nread = self.nitems_read(0) #number of items read on port 0 ninput_items = len(input_items[0]) # update the starting timestamp for this block start_timestamp = self.ref_timestamp + (nread - self.ref_time_offset)/self.fs #read all tags associated with port 0 for items in this work function tags = self.get_tags_in_range(0, nread, nread+ninput_items) #print "tdma controller start of tag loop" #lets find all of our tags, making the appropriate adjustments to our timing for tag in tags: key_string = pmt.pmt_symbol_to_string(tag.key) if key_string == "rx_time": self.ref_time_offset = tag.offset self.ref_timestamp = time_spec_t(pmt.to_python(tag.value)) # only set host offset at the start if not self.found_time: current_time = time.time() current_time_ahead = time_spec_t(current_time) - self.ref_timestamp self.mac_sm.cq_manager.set_current_time_ahead(float(current_time_ahead)) self.dev_logger.debug("for rx time %s and host time %s, setting time ahead to %s", self.ref_timestamp, current_time, current_time_ahead) self.found_time = True self.dev_logger.debug("tdma_controller found new rx time of %s at offset %ld", self.ref_timestamp, self.ref_time_offset) #print "mobile controller found rate" # if this tag occurs at the start of the sample block, update the # starting timestamp if tag.offset == nread: start_timestamp = self.ref_timestamp + (nread - self.ref_time_offset)/self.fs elif key_string == "rx_rate": self.fs = pmt.to_python(tag.value) self.found_rate = True #print "mobile controller found time" # if this tag occurs at the start of the sample block, update the # starting timestamp if tag.offset == nread: start_timestamp = self.ref_timestamp + float(nread - self.ref_time_offset)/self.fs # self.dev_logger.debug("tag processing complete") if not (self.current_sched is None): start_timestamp = start_timestamp.round_to_sample(self.fs, self.current_sched["t0"]) #determine first transmit slot when we learn the time if not self.know_time: if self.found_time and self.found_rate: #print "mobile controller knows time" self.know_time = True # if the state machine has a command queue manager, send out a time cal # message if hasattr(self.mac_sm, "cq_manager"): # calibrate the command queue to uhd timing errors cal_ts = self.ref_timestamp + float(nread + ninput_items - self.ref_time_offset)/self.fs self.mac_sm.cq_manager.add_command_to_queue([(cal_ts, 0, "time_cal")]) if self.know_time: # set the mac to generate packets if the start of the frame occurs at any # point between now and the end of the current block plus the lead limit. # This should guarantee that packets are always submitted at least one lead # limit ahead of their transmit time end_timestamp = self.ref_timestamp + self.mac_config["lead_limit"] + float(nread + ninput_items - self.ref_time_offset)/self.fs else: end_timestamp = start_timestamp if not (self.current_sched is None): end_timestamp = end_timestamp.round_to_sample(self.fs, self.current_sched["t0"]) # only update the current timestamp if it is further along than the state machine if self.current_timestamp < start_timestamp: self.current_timestamp = start_timestamp # use this to detect endless loops loop_counter = 0 loop_max = 100 last_ts = self.current_timestamp # grab the latest schedule updates from the thread safe data struct num_scheds = len(self.in_sched_update_q) # self.dev_logger.debug("processing schedule updates") for k in range(num_scheds): self.sched_seq.append(self.in_sched_update_q.popleft()) # self.dev_logger.debug("schedule updates all appended to schedule sequence") # run the state machine if time cal is complete if self.time_cal_complete: # if self.current_sched is not None: # self.last_frame = deepcopy(self.current_sched) # handle any incoming packets self.process_raw_incoming_queue() # start timers if self.monitor_timing == True: wall_start_ts = time.time() state_start_ts = self.current_timestamp outp = None #print "mobile controller state machine loop" # iterate state machine until the current timestamp exceeds the ending timestamp while self.current_timestamp < end_timestamp: # self.dev_logger.debug("iterating state machine") last_ts = self.current_timestamp rf_in = [] while( len(self.incoming_q) > 0): rf_in.append(self.incoming_q.popleft()) inp = { "app_in":self.app_in_q, "current_ts":self.current_timestamp, "end_ts":end_timestamp, "frame_config":self.frame_config, "frame_count":self.frame_count, "mac_config":self.mac_config, "packet_count":self.packet_count, "pkt_switch_queues":self.pkt_switch_queues, "plot_lock":self.plot_lock, "rf_in":rf_in, "sched_seq":self.sched_seq, } #print "current timestamp is %s, end timestamp is %s" %(self.current_timestamp, end_timestamp) #print "iterating state machine" outp = self.mac_sm.step( (inp, False) ) # handle outputs #print "sending tx frames" self.tx_frames(**outp) #print "sending commands" self.send_commands(**outp) #print "sending application packets" self.send_app_pkts(**outp) self.log_dropped_pkts(**outp) self.log_mac_behavior(inp,outp) #print "output handling complete" # update node state with results self.current_timestamp = time_spec_t(outp["current_ts"]) self.packet_count = outp["packet_count"] self.pkt_switch_queues = outp["pkt_switch_queues"] self.frame_count = outp["frame_count"] self.frame_config = outp["frame_config"] self.sched_seq = outp["sched_seq"] # self.schedule_valid = outp["schedule_valid"] #bers = [self.active_rx_slots[num]["ber"] for num in self.active_rx_slots] #self.dev_logger.debug("active slot bers are %s", bers) if last_ts == self.current_timestamp: loop_counter+=1 else: loop_counter = 0 if loop_counter > loop_max: self.dev_logger.warn("INFINITE (PROBABLY) LOOP DETECTED - breaking out after %d loops",loop_counter) self.dev_logger.warn("current timestamp is: %s end timestamp is %s",self.current_timestamp, end_timestamp) break #print "tdma controller work complete" # self.dev_logger.debug("iteration complete") # do timer calcs at end of work function if self.monitor_timing == True: wall_end_ts = time.time() # if state machine wasn't executed at least once, outp won't be defined, # so assign something reasonable to state_end_ts if not (outp is None): state_end_ts = time_spec_t(outp["current_ts"]) else: state_end_ts = state_start_ts wall_delta_ts = wall_end_ts - wall_start_ts state_delta_ts = float(state_end_ts - state_start_ts) self.state_time_deltas += state_delta_ts self.wall_time_deltas += wall_delta_ts if self.state_time_deltas >= self.poll_interval: self.dev_logger.info("runtime ratio was %f wall seconds per state second",self.wall_time_deltas/self.state_time_deltas) self.state_time_deltas = 0 self.wall_time_deltas = 0 # we're still in time cal elif self.do_time_cal: if not self.know_time: self.dev_logger.error(("The base station does not know it's own time. " + "Cannot calibrate")) elif not self.mac_sm.is_base(): self.dev_logger.error("Only base nodes can send time calibration beacons") else: # send out cal beacon frames for k in range(self.num_cal_beacons): packet_count = self.packet_count frame_count = 0 frame_ts = (self.current_timestamp + self.mac_config["lead_limit"] + k*self.cal_frame_config["frame_len"]) # round fractional part to an integer sample so we don't break the # slot selector frame_ts = time_spec_t(frame_ts.int_s(), round(frame_ts.frac_s()*self.fs)/self.fs ) config = self.mac_config mobile_queues=defaultdict(deque) # make mac beacon frames outs = self.manage_slots.send_frame(self.mac_config, self.cal_frame_config, self.cal_schedule, frame_count, packet_count, frame_ts, mobile_queues, ) frame_count, packet_count, tx_list, mobile_queues, dropped_pkts = outs # handle outputs self.packet_count = packet_count # filter out anything that's not a beacon tx_list = [x for x in tx_list if x[0]["pktCode"] == self.mac_sm._types_to_ints["beacon"]] # add tdma headers to all the packets in the tx list tx_list = [ (meta, self.mac_sm.pack_tdma_header(data, **meta)) for meta, data in tx_list ] # send packets self.tx_frames(tx_list) self.current_timestamp = (end_timestamp + self.mac_config["lead_limit"] + self.num_cal_beacons*self.cal_frame_config["frame_len"]) self.do_time_cal = False return ninput_items
def beacon_callback(self, ok, payload, timestamp, channel): #self._dev_logger.debug("raw beacon int %ld frac %f",timestamp[0], timestamp[1]) beacon_ts = time_spec_t(*timestamp) # do timing if self.monitor_timing == True: wall_start_ts = time.time() if self.wall_time_window_start is None: self.wall_time_window_start = wall_start_ts self._dev_logger.debug( "beacon callback called for packet timestamp %s", beacon_ts) if ok: self._dev_logger.debug("beacon passes CRC") meta, data = unpack_payload(payload) if meta is None: meta = {} meta["crcpass"] = True beacon_data = None # check if this is a beacon packet if self._ints_to_types[meta["pktCode"]] == "beacon": if meta["fromID"] == self._base_id: self.dev_log.info("beacon found on channel %i", channel) beacon_data = self.process_as_beacon(meta, data) else: self.dev_log.warn( "Dropping beacon packet with incorrect fromID") else: #self._dev_logger.debug("packet was not a beacon") beacon_data = None # beacons should have different access code from data packets so there should # only be beacons coming through here # however, if something makes it through but doesn't have a packet type of # beacon, or if there's a problem extracting the beacon, the beacon data will # be none if beacon_data is not None: self._dev_logger.debug( "beacon with tx timestamp %s found at timestamp %s", beacon_data.tx_time, beacon_ts) self._dev_logger.debug("waiting to acquire beacon lock") self._beacon_lock.acquire() self._dev_logger.debug("beacon lock acquired") # only add beacon if its timing error is below some threshold ts_error = float(beacon_ts - beacon_data.tx_time) self._dev_logger.info("beacon_timing_error:%f error_thresh:%f", ts_error, self._beacon_error_thresh) if self.in_time_cal and not self.time_cal_is_successful(): self._dev_logger.debug("running time calibration") self.do_time_calibration(ts_error) elif not self.in_time_cal: # overwrite packet metadata with measured values if self._overwrite_metadata: meta["frequency"] = channel if abs(ts_error) < self._beacon_error_thresh: self._beacon_list.append( (beacon_ts, meta, beacon_data)) self._dev_logger.debug( "adding beacon to list. in time cal: %s time cal is successful: %s", self.in_time_cal, self.time_cal_is_successful()) else: self._dev_logger.warning( ("rejecting beacon: error of %f s was too " + "large. Tx timestamp %s found at " + "timestamp %s"), ts_error, beacon_data.tx_time, beacon_ts) self._dev_logger.debug( "culling stale beacons: current time %s, timeout %s", beacon_ts, self._beacon_timeout) self.cull_stale_beacons(beacon_ts) # get a copy of the beacon list so we can release the beacon lock and # process the beacon list without blocking other threads # threads beacon_list = list(self._beacon_list) self._beacon_lock.release() self._dev_logger.debug("beacon lock released") # if all the beacons in the list go stale, declare sync lost if len(beacon_list) == 0: self.sync_lost(beacon_ts) # if we've met the threshold for number of beacons received to say we # have sync, set the flag if not self._has_sync: if len(beacon_list) >= self._min_beacons: self.sync_acquired() # if we have sync, compute the schedule for the current set of beacons if self._has_sync: self.compute_schedule(beacon_list) else: self._dev_logger.debug("beacon failed crc") meta = {"crcpass": False} #if not self.in_time_cal: # log packet meta["timestamp"] = (time_spec_t(timestamp)) meta["linkdirection"] = "down" meta["direction"] = "receive" meta["messagelength"] = len(payload) # always log that we received the packet once we're out of time cal self._ll_logging.packet(meta) # do timer calcs at end of callback if self.monitor_timing == True: wall_end_ts = time.time() wall_delta_ts = wall_end_ts - wall_start_ts self.wall_time_deltas.append(wall_delta_ts) if wall_end_ts - self.wall_time_window_start >= self.poll_interval: if len(self.wall_time_deltas) > 0: self._dev_logger.info( "average processing time was %f wall seconds per beacon", numpy.mean(self.wall_time_deltas)) self._dev_logger.info( "max processing time was %f wall seconds per beacon", max(self.wall_time_deltas)) self.wall_time_deltas = [] self.wall_time_window_start = wall_end_ts
def __init__( self, options, mac_sm, manage_slots, fs, mux_name, rx_channelizer_name, fhss_flag=0, start_time=None, plot_lock=None ): """ Inputs: complex stream from USRP, app_in, pkt in Outputs: pkt out, app_out """ gr.sync_block.__init__( self, name = "tdma_controller", in_sig = [numpy.complex64], out_sig = None ) # set up loggers self.ll_logging = lincolnlog.LincolnLog(__name__) self.dev_logger = logging.getLogger('developer') self.dev_logger.debug("tdma controller init") self.plot_lock = plot_lock # TODO: get these from mac/phy? macCode = 1 phyCode = 0 # store off any inputs we'll need later self.fs = float(fs) self.mac_sm = mac_sm self.mux_name = mux_name self.rx_channelizer_name = rx_channelizer_name self.start_time = start_time self.monitor_timing = True if self.monitor_timing == True: self.state_time_deltas = 0 self.wall_time_deltas = 0 self.poll_interval = 5 if fhss_flag: self.number_digital_channels = options.digital_freq_hop_num_channels else: self.number_digital_channels = 1 # round pre_guard to nearest sample: probably ok if we do this at upsampled # sample rate, but doing this at the rate given in the fs param for now self.pre_guard = round(options.slot_pre_guard*self.fs)/self.fs if self.pre_guard != options.slot_pre_guard: self.dev_logger.warn("Rounding pre_guard from %.15f to %.15f", self.pre_guard, options.slot_pre_guard) # store off option parameters that will be needed later self.frame_file = options.frame_file self.max_app_in_q_size = options.mac_tx_packet_q_depth self.max_incoming_q_size = options.phy_rx_packet_q_depth # Queue to hold packets coming from application layer prior to processing self.app_in_q = deque([],self.max_app_in_q_size) # Queue to hold packets coming from rf interface prior to processing self.raw_incoming_q = Queue.Queue() # queue to hold incoming packets after initial processing self.incoming_q = deque([],self.max_incoming_q_size) # Queue for schedule updates from the beacon consumer self.in_sched_update_q = deque() # dictionary mapping between node ID and the queue holding packets addressed to # that ID. Using defaultdict so that deques for new toIDs are automatically # created as needed. In general, mobiles will only have one queue, since they # only communicate directly with the base, but bases will have one queue per # associated mobile self.pkt_switch_queues = defaultdict(deque) if start_time is None: self.start_time = ceil(time.time()) else: self.start_time = start_time # reference timestamp used with time_offset and fs to compute the current time # based on the number of samples received self.ref_timestamp = time_spec_t(0) self.ref_time_offset = 0 self.current_timestamp = time_spec_t(0) # used for adding a packet id field to packets self.packet_count = 0 self.frame_count = 0 self.found_time = False self.found_rate = False self.know_time = False self.time_cal_complete = False self.do_time_cal = False self.num_cal_beacons = 0 self.beacon_channel = options.gpsbug_cal_channel # don't propagate any tags self.set_tag_propagation_policy(gr.gr_block.TPP_DONT) # register output message ports self.message_port_register_out(OUTGOING_PKT_PORT) self.message_port_register_out(TO_APP_PORT) self.message_port_register_out(COMMAND_OUT_PORT) # register input message ports self.message_port_register_in(FROM_APP_PORT) # self.message_port_register_in(INCOMING_PKT_PORT) self.message_port_register_in(SCHEDULE_IN_PORT) # register message handlers for input ports self.set_msg_handler(FROM_APP_PORT, self.handle_app_pkt) # self.set_msg_handler(INCOMING_PKT_PORT, self.handle_incoming_pkt) self.set_msg_handler(SCHEDULE_IN_PORT, self.handle_schedule_update) #state machine should only be started by top level node once calibration is complete # self.mac_sm.start() # define the link direction to assign to received packets if self.mac_sm.is_base(): self.rx_pkt_dir = "up" # decide which packet stat tracking function to use self.manage_slots = manage_slots else: self.rx_pkt_dir = "down" # decide which packet stat tracking function to use self.manage_slots = manage_slots # list for the current set of schedules the state machine is operating on self.sched_seq = [] self.frame_config = None self.current_sched = None self.last_frame_slots = None # TODO: LOG THESE self.mac_config = { "app_in_q_size":self.max_app_in_q_size, "base_id":options.base_station_mac_address, "bits_per_symbol":1, # TODO: Always true for GMSK...will have to fix later "fhss_flag":fhss_flag, "fs":self.fs, "lead_limit":options.frame_lead_limit, "macCode":macCode, "mux_command":self.mux_name + ".set_schedules", "my_id":options.source_mac_address, "number_digital_channels":options.digital_freq_hop_num_channels, "beacon_channel":options.gpsbug_cal_channel, "peer_ids":options.sink_mac_addresses, "phyCode":phyCode, "pre_guard":self.pre_guard, "rx_channelizer_command":self.rx_channelizer_name + ".channelizer_command", "rx_channelizer_return_to_beacon":self.rx_channelizer_name + ".return_to_beacon_channel", "samples_per_symbol":options.modulation_samples_per_symbol, "slot_manager":self.manage_slots, } self.dev_logger.debug("tdma controller init complete")
def parse_frame_file(frame_file, t0, fs): #dtd = etree.DTD(dtd_file) logger = logging.getLogger('developer') slot_fields = dict([("owner", "int"), ("len", "float"), ("offset", "float"), ("type", "string"), ("rf_freq", "float"), ("bb_freq", "int"), ("bw", "float")]) parser = etree.XMLParser(dtd_validation=True) filepath = os.path.expandvars(os.path.expanduser(frame_file)) filepath = os.path.abspath(filepath) xml = etree.parse(filepath, parser) root = xml.getroot() # convert xml tree to dictionary raw_frame = etree_to_dict(root) raw_frame = raw_frame["frame"] frame_config = {} # build up frame config dictionary frame_config["t0"] = time_spec_t(t0) frame_config["frame_len"] = float(raw_frame["frame_len"]) # convert each field of each slot to the correct type raw_frame["slots"] = [ convert_types(slot, slot_fields) for slot in raw_frame["slots"] ] # add a placeholder tx_gain field to each slot for k in range(len(raw_frame["slots"])): raw_frame["slots"][k]["tx_gain"] = 0 # now store off all the slots as a list of named tuples frame_config["slots"] = [ SlotParamTuple(**slot) for slot in raw_frame["slots"] ] # sort slots by order of offset frame_config["slots"].sort(key=lambda slot: slot.offset) # # enforce slot/frame boundaries occur at integer samples # # check that t0 is at an integer sample t0_frac = frame_config["t0"].frac_s() t0_frac_rounded = round(t0_frac * fs) / fs if t0_frac != t0_frac_rounded: logger.warn("rounding fractional seconds from %.15f to %.15f", t0_frac, t0_frac_rounded) frame_config["t0"] = time_spec_t(frame_config["t0"].int_s(), t0_frac_rounded) # check that frame len is at an integer sample frame_len_rounded = round(frame_config["frame_len"] * fs) / fs if frame_config["frame_len"] != frame_len_rounded: logger.warn("rounding frame len from %.15f to %.15f", frame_config["frame_len"], frame_len_rounded) frame_config["frame_len"] = frame_len_rounded try: # do a limited amount of error checking for num, slot in enumerate(frame_config["slots"]): offset_rounded = round(slot.offset * fs) / fs len_rounded = round(slot.len * fs) / fs if slot.offset != offset_rounded: logger.warn("rounding slot %d offset from %.15f to %.15f", num, slot.offset, offset_rounded) if slot.len != len_rounded: logger.warn("rounding slot %d len from %.15f to %.15f", num, slot.len, len_rounded) # more precision fun end_of_slot = round((offset_rounded + len_rounded) * fs) / fs if end_of_slot > frame_config["frame_len"]: raise InvalidFrameError( ("slot %d with offset %f and len %f extends past " + "the end of the frame, len %f") % (num, slot.offset, slot.len, frame_config["frame_len"])) frame_config["slots"][num] = slot._replace(offset=offset_rounded, len=len_rounded) except InvalidFrameError, err: logger.error("Invalid Frame: %s", err.msg) raise
def beacon_callback(self, ok, payload, timestamp, channel): #self._dev_logger.debug("raw beacon int %ld frac %f",timestamp[0], timestamp[1]) beacon_ts = time_spec_t(*timestamp) # do timing if self.monitor_timing == True: wall_start_ts = time.time() if self.wall_time_window_start is None: self.wall_time_window_start = wall_start_ts self._dev_logger.debug("beacon callback called for packet timestamp %s",beacon_ts) if ok: self._dev_logger.debug("beacon passes CRC") meta, data = unpack_payload(payload) if meta is None: meta = {} meta["crcpass"] = True beacon_data = None # check if this is a beacon packet if self._ints_to_types[meta["pktCode"]] == "beacon": if meta["fromID"] == self._base_id: self.dev_log.info("beacon found on channel %i", channel) beacon_data = self.process_as_beacon(meta, data) else: self.dev_log.warn("Dropping beacon packet with incorrect fromID") else: #self._dev_logger.debug("packet was not a beacon") beacon_data = None # beacons should have different access code from data packets so there should # only be beacons coming through here # however, if something makes it through but doesn't have a packet type of # beacon, or if there's a problem extracting the beacon, the beacon data will # be none if beacon_data is not None: self._dev_logger.debug("beacon with tx timestamp %s found at timestamp %s", beacon_data.tx_time, beacon_ts) self._dev_logger.debug("waiting to acquire beacon lock") self._beacon_lock.acquire() self._dev_logger.debug("beacon lock acquired") # only add beacon if its timing error is below some threshold ts_error = float(beacon_ts - beacon_data.tx_time) self._dev_logger.info("beacon_timing_error:%f error_thresh:%f", ts_error,self._beacon_error_thresh) if self.in_time_cal and not self.time_cal_is_successful(): self._dev_logger.debug("running time calibration") self.do_time_calibration(ts_error) elif not self.in_time_cal: # overwrite packet metadata with measured values if self._overwrite_metadata: meta["frequency"] = channel if abs(ts_error) < self._beacon_error_thresh: self._beacon_list.append( (beacon_ts, meta, beacon_data )) self._dev_logger.debug("adding beacon to list. in time cal: %s time cal is successful: %s", self.in_time_cal,self.time_cal_is_successful()) else: self._dev_logger.warning( ("rejecting beacon: error of %f s was too " + "large. Tx timestamp %s found at " + "timestamp %s") , ts_error, beacon_data.tx_time, beacon_ts) self._dev_logger.debug("culling stale beacons: current time %s, timeout %s", beacon_ts, self._beacon_timeout) self.cull_stale_beacons(beacon_ts) # get a copy of the beacon list so we can release the beacon lock and # process the beacon list without blocking other threads # threads beacon_list = list(self._beacon_list) self._beacon_lock.release() self._dev_logger.debug("beacon lock released") # if all the beacons in the list go stale, declare sync lost if len(beacon_list) ==0: self.sync_lost(beacon_ts) # if we've met the threshold for number of beacons received to say we # have sync, set the flag if not self._has_sync: if len(beacon_list) >= self._min_beacons: self.sync_acquired() # if we have sync, compute the schedule for the current set of beacons if self._has_sync: self.compute_schedule(beacon_list) else: self._dev_logger.debug("beacon failed crc") meta = {"crcpass":False} #if not self.in_time_cal: # log packet meta["timestamp"] = (time_spec_t(timestamp)) meta["linkdirection"] = "down" meta["direction"] = "receive" meta["messagelength"] = len(payload) # always log that we received the packet once we're out of time cal self._ll_logging.packet(meta) # do timer calcs at end of callback if self.monitor_timing == True: wall_end_ts = time.time() wall_delta_ts = wall_end_ts - wall_start_ts self.wall_time_deltas.append(wall_delta_ts) if wall_end_ts - self.wall_time_window_start >= self.poll_interval: if len(self.wall_time_deltas) > 0: self._dev_logger.info("average processing time was %f wall seconds per beacon", numpy.mean(self.wall_time_deltas)) self._dev_logger.info("max processing time was %f wall seconds per beacon", max(self.wall_time_deltas)) self.wall_time_deltas = [] self.wall_time_window_start = wall_end_ts