def _check_pending(self): """ Scans from the last check time to the current time looking for requests that are due to expire and retries them if necessary. """ time_now = Time.now() timeout_delta = Time(SBP_FILEIO_TIMEOUT) for check_time in Time.iter_since(self._last_check_time, time_now): pending_reqs = self._expire_map[check_time] retried_writes = [] for pending_req in list(pending_reqs): time_expire = pending_req.time + timeout_delta if time_now >= time_expire: if pending_req.tries >= MAXIMUM_RETRIES: raise Exception('Timed out') # If the completion map becomes inconsistent (because # things are completing at the same time they're # being re-tried) then the `completed` field should # prevent us from re-sending a write in this case. if not pending_req.completed: self._retry_send(check_time, pending_req, retried_writes) # Pending writes can be marked completed while this function # is running, so a key error means is was marked completed # after we sent a retry (therefore _try_remove_keys ignores # key errors). self._try_remove_keys(self._expire_map[check_time], *retried_writes) self._last_check_time = time_now
def test_time(): t1 = Time.now() t2 = Time.now() # For epoch time, seconds will "never" be zero assert t1._seconds != 0 and t2._seconds != 0 # Could land on 0 milliseconds, but probably never twice assert t1._millis != 0 or t2._millis != 0
def _check_pending(self): """ Scans from the last check time to the current time looking for requests that are due to expire and retries them if necessary. """ time_now = Time.now() timeout_delta = Time(SBP_FILEIO_TIMEOUT) for check_time in Time.iter_since(self._last_check_time, time_now): pending_reqs = self._expire_map[check_time] retried_writes = [] for pending_req in pending_reqs.keys(): time_expire = pending_req.time + timeout_delta if time_now >= time_expire: if pending_req.tries >= MAXIMUM_RETRIES: raise Exception('Timed out') # If the completion map becomes inconsistent (because # things are completing at the same time they're # being re-tried) then the `completed` field should # prevent us from re-sending a write in this case. if not pending_req.completed: self._retry_send(check_time, pending_req, retried_writes) # Pending writes can be marked completed while this function # is running, so a key error means is was marked completed # after we sent a retry (therefore _try_remove_keys ignores # key errors). self._try_remove_keys(self._expire_map[check_time], *retried_writes) self._last_check_time = time_now
def test_time_ge(): t4 = Time(1, 500) t5 = Time(1, 500) assert t4 >= t5 t4 += Time(0, 1) assert t4 >= t5 t5 += Time(0, 2) assert not (t4 >= t5)
def test_time_le(): t1 = Time(1, 500) t2 = Time(1, 500) assert t1 <= t2 t2 += Time(0, 1) assert t1 <= t2 t1 += Time(0, 2) assert not (t1 <= t2)
def _ensure_config_req_sent(self): if self._config_retry_time is not None: return now = Time.now() self._config_retry_time = now + Time(0, CONFIG_REQ_RETRY_MS) self._config_timeout = now + Time(0, CONFIG_REQ_TIMEOUT_MS) self._config_seq = random.randint(0, 0xffffffff) self._config_msg = None self._link(MsgFileioConfigReq(sequence=self._config_seq))
def test_time(): import time time.sleep(1) t1 = Time.now() t2 = Time.now() # For epoch time, seconds will "never" be zero assert (((t1._seconds == 0 and t1._millis != 0) or (t1._seconds != 0)) and ((t2._seconds == 0 and t2._millis != 0) or (t2._seconds != 0)) and (t1._millis != 0 or t2._millis != 0) ) # Could land on 0 milliseconds, but probably never twice
def test_iter(): t1 = Time(1, 500) t2 = Time(1, 505) ls = [Time(1, 501), Time(1, 502), Time(1, 503), Time(1, 504), Time(1, 505)] for iter_t in Time.iter_since(t1, t2): assert ls.pop(0) == iter_t assert len(ls) == 0
def _retry_send(self, check_time, pending_req, delete_keys): """ Retry a request by updating it's expire time on the object itself and in the expiration map. """ self._total_retries += 1 self._total_sends += 1 timeout_delta = Time(SBP_FILEIO_TIMEOUT) send_time = Time.now() new_expire = send_time + timeout_delta pending_req.record_retry(send_time, new_expire) self._expire_map[new_expire][pending_req] = pending_req self._link(pending_req.message) delete_keys.append(pending_req)
def _config_received(self): self._ensure_config_req_sent() if self._config_msg is not None: return True now = Time.now() if now >= self._config_retry_time: self._link(MsgFileioConfigReq(sequence=self._config_seq)) self._config_retry_time = now + Time(0, CONFIG_REQ_RETRY_MS) if now >= self._config_timeout: self._config_msg = MsgFileioConfigResp(sequence=0, window_size=100, batch_size=1, fileio_version=0) return self._config_msg is not None
def __init__(self, link, msg_type, cb=None): """ Args --- link : Handler Link over which messages will be sent. msg_type : The type of message being sent cb : Invoked when SBP message with type `msg_type` is received """ self._link = link self._msg_type = msg_type self._callback = cb self._seqmap = {} self._batch_msgs = [] self._last_check_time = Time.now() self._expire_map = defaultdict(dict) self._init_fileio_config(SBP_FILEIO_WINDOW_SIZE, SBP_FILEIO_BATCH_SIZE, PROGRESS_CB_REDUCTION_FACTOR) self._callback_thread = None self._link_thread = None self._total_sends = 1.0 self._total_retries = 0 self._config_retry_time = None
def _send(self, msg, batch_size): """ Sends data via the current link, potentially batching it together. Parameters ---------- msg : MsgFileioReadReq, MsgFileioReadDirReq, MsgFileioWriteReq, MsgFileioRemove The message to be sent via the current link batch_size : int The number of message to batch together before actually sending """ if msg is not None: self._batch_msgs.append(msg) if len(self._batch_msgs) >= batch_size: self._wait_window_available(batch_size) time_now = Time.now() expiration_time = time_now + Time(SBP_FILEIO_TIMEOUT) for msg in self._batch_msgs: self._record_pending_req(msg, time_now, expiration_time) self._link(*self._batch_msgs) self._total_sends += len(self._batch_msgs) del self._batch_msgs[:]
def mk_progress_cb(file_length): time_last = [Time.now()] offset_last = [0] b_to_mb = 1024 * 1024.0 file_mb = file_length / b_to_mb rolling_avg_len = 20 rolling_avg_pts = [] previous_avg = [None] def compute_rolling_average(speed_kbs): removed_pt = None if len(rolling_avg_pts) >= rolling_avg_len: removed_pt = rolling_avg_pts.pop(0) rolling_avg_pts.append(speed_kbs) if removed_pt is not None: assert previous_avg[0] is not None new_avg_contrib = speed_kbs / rolling_avg_len removed_avg_contrib = removed_pt / rolling_avg_len previous_avg[0] -= removed_avg_contrib previous_avg[0] += new_avg_contrib return previous_avg[0] else: previous_avg[0] = sum(rolling_avg_pts) / len(rolling_avg_pts) return previous_avg[0] def the_callback(offset, repeater): time_current = Time.now() offset_delta = offset - offset_last[0] time_delta = time_current - time_last[0] percent_done = 100 * (offset / float(file_length)) mb_confirmed = offset / b_to_mb speed_kbs = offset_delta / time_delta.to_float() / 1024 rolling_avg = compute_rolling_average(speed_kbs) fmt_str = "\r[{:02.02f}% ({:.02f}/{:.02f} MB) at {:.02f} kB/s ({:0.02f}% retried)]" percent_retried = 100 * (repeater.total_retries / repeater.total_sends) status_str = fmt_str.format(percent_done, mb_confirmed, file_mb, rolling_avg, percent_retried, repeater.total_retries, repeater.total_sends) sys.stdout.write(status_str) sys.stdout.flush() time_last[0] = time_current offset_last[0] = offset return the_callback
def test_time_sub(): t6 = Time(1, 400) t7 = Time(2, 500) t8 = (t7 - t6) # confirm positive subraction result makes sense assert t8 == Time(1, 100) # adding then subtracking t6 should cancel out back to t7 assert (t8 + t6) == t7 # confirm negative result makes sense t9 = t6 - t7 assert t9 == Time(-2, 900) # TODO: result for t9 maybe should be normalized to -1 -100 in the util # But at the very least float representation should match assert t8.to_float() == -t9.to_float() # adding then subtracking t7 should cancel out back to t6 assert (t9 + t7) == t6 # Confirm identity property of addition / subtraction assert t8 + t9 == Time(0, 0) # Confirm adding negative number is equivalent to subtracting positive number t10 = Time(-1, -100) t11 = Time(1, 100) assert (t7 + t10) == Time(1, 400) assert (t7 - t11) == Time(1, 400)
def the_callback(offset, repeater): time_current = Time.now() offset_delta = offset - offset_last[0] time_delta = time_current - time_last[0] percent_done = 100 * (offset / float(file_length)) mb_confirmed = offset / b_to_mb speed_kbs = offset_delta / time_delta.to_float() / 1024 rolling_avg = compute_rolling_average(speed_kbs) fmt_str = "\r[{:02.02f}% ({:.02f}/{:.02f} MB) at {:.02f} kB/s ({:0.02f}% retried)]" percent_retried = 100 * (repeater.total_retries / repeater.total_sends) status_str = fmt_str.format(percent_done, mb_confirmed, file_mb, rolling_avg, percent_retried, repeater.total_retries, repeater.total_sends) sys.stdout.write(status_str) sys.stdout.flush() time_last[0] = time_current offset_last[0] = offset
def test_time_eq(): t1 = Time(1, 0) t2 = Time(2, 0) assert t1 != t2 t3 = Time(1, 0) assert t1 == t3
def test_time_lt(): t1 = Time(1, 500) t2 = Time(1, 500) assert not (t1 > t2) t1 += Time(0, 1) assert (t2 < t1)
def test_time_add(): t4 = Time(1, 500) t5 = Time(1, 500) assert (t4 + t5) == Time(3, 0)
def test_time_gt(): t4 = Time(1, 500) t5 = Time(1, 500) assert not (t4 > t5) t4 += Time(0, 1) assert (t4 > t5)
def test_time_repr(): t1 = Time(1, 0) assert repr(t1) == "Time(s=1,ms=0)"
def __init__(self, index): self.index = index self.completed = None self.time = Time()
def test_time_to_float(): t6 = Time(1, 400) t6.to_float() - 1.4 <= 0.00001