def sync_entries(self, log_entries, retries): try: bucket_instances = set([entry["key"] for entry in log_entries]) except KeyError: log.error("log containing bad key is: %s", log_entries) raise new_retries = [] for bucket_instance in bucket_instances.union(retries): if ":" not in bucket_instance: # it's just a plain bucket from an old version of the agent bucket_instance = self.get_bucket_instance(bucket_instance) bound = client.get_worker_bound(self.dest_conn, "bucket-index", bucket_instance) marker = bound["marker"] # remap dictionaries to object-like retries = [obj_.to_obj(i) for i in bound["retries"]] timestamp = bound["oldest_time"] try: sync_result = self.inc_sync_bucket_instance(bucket_instance, marker, timestamp, retries) except Exception as e: log.warn('error syncing bucket instance "%s": %s', bucket_instance, e, exc_info=True) sync_result = RESULT_ERROR if sync_result == RESULT_ERROR: new_retries.append(bucket_instance) return new_retries
def sync_entries(self, log_entries, retries): try: bucket_instances = set([entry['key'] for entry in log_entries]) except KeyError: log.error('log containing bad key is: %s', log_entries) raise new_retries = [] for bucket_instance in bucket_instances.union(retries): try: marker, timestamp, retries = client.get_worker_bound( self.dest_conn, 'bucket-index', bucket_instance) except client.NotFound: log.debug('no worker bound found for bucket instance "%s"', bucket_instance) marker, timestamp, retries = '', DEFAULT_TIME, [] try: sync_result = self.inc_sync_bucket_instance(bucket_instance, marker, timestamp, retries) except Exception as e: log.warn('error syncing bucket instance "%s": %s', bucket_instance, e, exc_info=True) sync_result = RESULT_ERROR if sync_result == RESULT_ERROR: new_retries.append(bucket_instance) return new_retries
def test_get_bound_fails_fallsback_to_defaults(self): self.register(status=404) result = client.get_worker_bound( self.connection, 'bucket-index', 'beast:us-east' ) assert result['marker'] == " " assert result['retries'] == [] assert result['oldest_time'] == DEFAULT_TIME
def test_get_bound_has_right_metadata(self): self.register() result = client.get_worker_bound( self.connection, 'bucket-index', 'beast:us-east' ) assert result['marker'] == "00000000002.2.3" assert result['retries'] == set(['hello']) assert result['oldest_time'] == "0.000000"
def get_worker_bound(self, shard_num): bound = client.get_worker_bound(self.dest_conn, self.type, shard_num) marker = bound['marker'] retries = bound['retries'] dev_log.debug('oldest marker and time for shard %d are: %r %r', shard_num, marker, bound['oldest_time']) dev_log.debug('%d items to retry are: %r', len(retries), retries) return marker, retries
def get_worker_bound(self, shard_num): try: marker, timestamp, retries = client.get_worker_bound( self.dest_conn, self.type, shard_num) log.debug('oldest marker and time for shard %d are: %r %r', shard_num, marker, timestamp) log.debug('%d items to retrie are: %r', len(retries), retries) except client.NotFound: # if no worker bounds have been set, start from the beginning marker, retries = '', [] return marker, retries
def get_worker_bound(self, shard_num): bound = client.get_worker_bound( self.dest_conn, self.type, shard_num) marker = bound['marker'] retries = bound['retries'] dev_log.debug('oldest marker and time for shard %d are: %r %r', shard_num, marker, bound['oldest_time']) dev_log.debug('%d items to retry are: %r', len(retries), retries) return marker, retries
def sync_entries(self, log_entries, retries): try: bucket_instances = set([entry['key'] for entry in log_entries]) except KeyError: log.error('log containing bad key is: %s', log_entries) raise new_retries = [] for bucket_instance in bucket_instances.union(retries): if ':' not in bucket_instance: # it's just a plain bucket from an old version of the agent bucket_instance = self.get_bucket_instance(bucket_instance) bound = client.get_worker_bound( self.dest_conn, 'bucket-index', bucket_instance) marker = bound['marker'] # remap dictionaries to object-like retries = [obj_.to_obj(i) for i in bound['retries']] timestamp = bound['oldest_time'] try: sync_result = self.inc_sync_bucket_instance(bucket_instance, marker, timestamp, retries) except Exception as e: log.warn('error syncing bucket instance "%s": %s', bucket_instance, e, exc_info=True) sync_result = RESULT_ERROR if sync_result == RESULT_ERROR: new_retries.append(bucket_instance) return new_retries