def onread(self, results, error): self.read_result = None try: if error.code: log.error( "Failed to read key: {0} from groups: {1}: {2}".format( self.key, self.same_groups, error)) self.stats.read_failed += 1 if error.code == 110 and self.attempt < self.ctx.attempts: self.attempt += len(self.read_session.groups) log.debug( "Read has been timed out. Try to reread key: {0} from groups: {1}, attempt: {2}/{3}" .format(self.key, self.same_groups, self.attempt, self.ctx.attempts)) elif len(self.key_infos) > 1: self.stats.read_failed += len(self.read_session.groups) self.diff_groups += self.read_session.groups self.run() else: log.error( "Failed to read key: {0} from any available group. This key couldn't be recovered now." ) self.stop(False) return self.stats.read += 1 self.stats.read_bytes += results[-1].size if self.recovered_size == 0: self.write_session.user_flags = results[-1].user_flags self.timestamp = results[-1].timestamp self.attempt = 0 if self.chunked and len(results) > 1: self.missed_groups += [ r.group_id for r in results if r.error.code ] if validate_index(results[-1]) and self.diff_groups: self.index_shard = True log.debug("Index has been found in key: {0}".format( repr(self.key))) else: log.debug( "Regular object has been found in key: {0}. Copy it from groups: {1} to groups: {2}" .format(repr(self.key), self.same_groups, self.write_session.groups)) self.index_shard = False self.write_data = results[-1].data self.write() except Exception as e: log.error( "Failed to handle origin key: {0}, exception: {1}, traceback: {2}" .format(self.key, repr(e), traceback.format_exc())) self.stop(False)
def onread(self, results, error): try: if error.code: log.error("Failed to read key: {0} from groups: {1}: {2}".format(self.key, self.same_groups, error)) self.stats.read_failed += 1 if error.code == 110 and self.attempt < self.ctx.attempts: self.attempt += len(self.read_session.groups) log.debug("Read has been timed out. Try to reread key: {0} from groups: {1}, attempt: {2}/{3}" .format(self.key, self.same_groups, self.attempt, self.ctx.attempts)) self.read() elif len(self.key_infos) > 1: self.stats.read_failed += len(self.read_session.groups) self.diff_groups += self.read_session.groups self.run() else: log.error("Failed to read key: {0} from any available group. This key couldn't be recovered now.".format(self.key)) self.stop(False) return self.stats.read += 1 self.stats.read_bytes += results[-1].size if self.recovered_size == 0: self.write_session.user_flags = results[-1].user_flags self.write_session.timestamp = results[-1].timestamp if self.total_size != results[-1].total_size: self.total_size = results[-1].total_size self.chunked = self.total_size > self.ctx.chunk_size self.attempt = 0 if self.chunked and len(results) > 1: self.missed_groups += [r.group_id for r in results if r.error.code] if validate_index(results[-1]) and self.diff_groups: self.index_shard = True log.debug("Index has been found in key: {0}".format(repr(self.key))) else: log.debug("Regular object has been found in key: {0}. Copy it from groups: {1} to groups: {2}" .format(repr(self.key), self.same_groups, self.write_session.groups)) self.index_shard = False self.write_data = results[-1].data self.write() except Exception as e: log.error("Failed to handle origin key: {0}, exception: {1}, traceback: {2}" .format(self.key, repr(e), traceback.format_exc())) self.stop(False)
def onread(self, results, error): try: if error.code: log.error( "Failed to read key: {0} from groups: {1}: {2}".format( self.key, self.same_groups, error)) self.stats.read_failed += len(results) if error.code == errno.ETIMEDOUT: if self.attempt < self.ctx.attempts: self.attempt += 1 old_timeout = self.read_session.timeout self.read_session.timeout *= 2 log.error( "Read has been timed out. Try to reread key: {0} from groups: {1}, attempt: {2}/{3} " "with increased timeout: {4}/{5}".format( self.key, self.same_groups, self.attempt, self.ctx.attempts, self.read_session.timeout, old_timeout)) self.read() else: log.error( "Read has been timed out {0} times, all {1} attemps are used. " "The key: {1} can't be recovery now. Skip it". format(self.attempt, self.key)) self.stats.skipped += 1 self.stop(False) elif len(self.key_infos) > 1: log.error( "Key: {0} has available replicas in other groups. Try to recover the key from them" .format(self.key)) self.diff_groups += self.read_session.groups self.run() else: log.error( "Failed to read key: {0} from any available group. " "This key can't be recovered now. Skip it".format( self.key)) self.stats.skipped += 1 self.stop(False) return self.stats.read_failed += len(results) - 1 self.stats.read += 1 self.stats.read_bytes += results[-1].size if self.recovered_size == 0: self.write_session.user_flags = results[-1].user_flags self.write_session.timestamp = results[-1].timestamp self.read_session.ioflags |= elliptics.io_flags.nocsum self.read_session.groups = [results[-1].group_id] self.key_flags = results[-1].record_flags if self.total_size != results[-1].total_size: self.total_size = results[-1].total_size self.chunked = self.total_size > self.ctx.chunk_size self.attempt = 0 if self.chunked and len(results) > 1: self.missed_groups += [ r.group_id for r in results if r.error.code ] if validate_index(results[-1]) and self.diff_groups: self.index_shard = True log.debug("Index has been found in key: {0}".format( repr(self.key))) else: log.debug( "Regular object has been found in key: {0}. Copy it from groups: {1} to groups: {2}" .format(repr(self.key), self.same_groups, self.write_session.groups)) self.index_shard = False self.write_data = results[-1].data self.write() except Exception as e: log.error( "Failed to handle origin key: {0}, exception: {1}, traceback: {2}" .format(self.key, repr(e), traceback.format_exc())) self.stop(False)
def onread(self, results, error): try: corrupted_groups = [r.group_id for r in results if r.status == -errno.EILSEQ] if corrupted_groups: with self.pending_operations_lock: self.pending_operations += 1 KeyRemover(self.key, self.remove_session, corrupted_groups, self.ctx, self.stats, self.on_complete).remove() if error.code: log.error("Failed to read key: {0} from groups: {1}: {2}".format(self.key, self.same_groups, error)) self.stats_cmd.counter('read.{0}'.format(error.code), 1) self.stats.read_failed += len(results) if error.code == -errno.ETIMEDOUT: if self.attempt < self.ctx.attempts: self.attempt += 1 old_timeout = self.read_session.timeout self.read_session.timeout *= 2 log.error("Read has been timed out. Try to reread key: {0} from groups: {1}, attempt: {2}/{3} " "with increased timeout: {4}/{5}" .format(self.key, self.same_groups, self.attempt, self.ctx.attempts, self.read_session.timeout, old_timeout)) self.read() else: log.error("Read has been timed out {0} times, all {1} attempts are used. " "The key: {1} can't be recovery now. Skip it" .format(self.attempt, self.key)) self.stats.skipped += 1 self.stop(False) elif len(self.key_infos) > 1: log.error("Key: {0} has available replicas in other groups. Try to recover the key from them" .format(self.key)) self.diff_groups += self.read_session.groups self.run() else: log.error("Failed to read key: {0} from any available group. " "This key can't be recovered now. Skip it" .format(self.key)) self.stats.skipped += 1 self.stop(False) return self.stats.read_failed += len(results) - 1 self.stats.read += 1 self.stats.read_bytes += results[-1].size if self.recovered_size == 0: self.write_session.user_flags = results[-1].record_info.user_flags self.write_session.timestamp = results[-1].record_info.data_timestamp self.write_session.json_timestamp = results[-1].record_info.json_timestamp self.read_session.ioflags |= elliptics.io_flags.nocsum self.read_session.groups = [results[-1].group_id] self.key_flags = results[-1].record_info.record_flags self.json_capacity = results[-1].record_info.json_capacity if self.total_size != results[-1].record_info.data_size: self.total_size = results[-1].record_info.data_size self.chunked = self.total_size > self.ctx.chunk_size self.attempt = 0 if self.chunked and len(results) > 1: self.missed_groups += [r.group_id for r in results if r.error.code] if validate_index(results[-1]) and self.diff_groups: self.index_shard = True log.debug("Index has been found in key: {0}".format(repr(self.key))) else: log.debug("Regular object has been found in key: {0}. Copy it from groups: {1} to groups: {2}" .format(repr(self.key), self.same_groups, self.write_session.groups)) self.index_shard = False self.write_data = results[-1].data self.json = results[-1].json self.write() except Exception as e: log.error("Failed to handle origin key: {0}, exception: {1}, traceback: {2}" .format(self.key, repr(e), traceback.format_exc())) self.stop(False)