def collect_diffs(self, start_id, max_size=1024): """ Create a MergeInputReader and download diffs starting with sequence id `start_id` into it. `max_size` restricts the number of diffs that are downloaded. The download stops as soon as either a diff cannot be downloaded or the unpacked data in memory exceeds `max_size` kB. If some data was downloaded, returns a namedtuple with three fields: `id` contains the sequence id of the last downloaded diff, `reader` contains the MergeInputReader with the data and `newest` is a sequence id of the most recent diff available. Returns None if there was an error during download or no new data was available. """ left_size = max_size * 1024 current_id = start_id # must not read data newer than the published sequence id # or we might end up reading partial data newest = self.get_state_info() if newest is None or current_id > newest.sequence: return None rd = MergeInputReader() while left_size > 0 and current_id <= newest.sequence: try: diffdata = self.get_diff_block(current_id) except: LOG.debug("Error during diff download. Bailing out.") diffdata = '' if len(diffdata) == 0: if start_id == current_id: return None break left_size -= rd.add_buffer(diffdata, self.diff_type) LOG.debug( "Downloaded change %d. (%d kB available in download buffer)", current_id, left_size / 1024) current_id += 1 return DownloadResult(current_id - 1, rd, newest.sequence)
def collect_diffs(self, start_id, max_size=1024): """ Create a MergeInputReader and download diffs starting with sequence id `start_id` into it. `max_size` restricts the number of diffs that are downloaded. The download stops as soon as either a diff cannot be downloaded or the unpacked data in memory exceeds `max_size` kB. If some data was downloaded, returns a namedtuple with three fields: `id` contains the sequence id of the last downloaded diff, `reader` contains the MergeInputReader with the data and `newest` is a sequence id of the most recent diff available. Returns None if there was an error during download or no new data was available. """ left_size = max_size * 1024 current_id = start_id # must not read data newer than the published sequence id # or we might end up reading partial data newest = self.get_state_info() if newest is None or current_id > newest.sequence: return None rd = MergeInputReader() while left_size > 0 and current_id <= newest.sequence: try: diffdata = self.get_diff_block(current_id) except: diffdata = '' if len(diffdata) == 0: if start_id == current_id: return None break left_size -= rd.add_buffer(diffdata, self.diff_type) log.debug("Downloaded change %d. (%d kB available in download buffer)" % (current_id, left_size / 1024)) current_id += 1 return DownloadResult(current_id - 1, rd, newest.sequence)
def apply_diffs(self, handler, start_id, max_size=1024, simplify=True): """ Download diffs starting with sequence id `start_id`, merge them together and then apply them to handler `handler`. `max_size` restricts the number of diffs that are downloaded. The download stops as soon as either a diff cannot be downloaded or the unpacked data in memory exceeds `max_size` kB. The function returns the sequence id of the last diff that was downloaded or None if the download failed completely. """ left_size = max_size * 1024 current_id = start_id # must not read data newer than the published sequence id # or we might end up reading partial data try: newest = self.get_state_info() except: return None if current_id > newest.sequence: return None rd = MergeInputReader() while left_size > 0 and current_id <= newest.sequence: try: diffdata = self.get_diff_block(current_id) except: diffdata = '' if len(diffdata) == 0: if start_id == current_id: return None break left_size -= rd.add_buffer(diffdata, self.diff_type) current_id += 1 rd.apply(handler, simplify) return current_id - 1