def _revert(self, job, begin): """ Process a REVERT job. """ self.logger.increment('partition.delete.count.%s' % (job['local_dev']['device'], )) # we'd desperately like to push this partition back to it's # primary location, but if that node is down, the next best thing # is one of the handoff locations - which *might* be us already! dest_nodes = itertools.chain( job['sync_to'], job['policy'].object_ring.get_more_nodes(job['partition']), ) syncd_with = 0 reverted_objs = {} for node in dest_nodes: if syncd_with >= len(job['sync_to']): break if node['id'] == job['local_dev']['id']: # this is as good a place as any for this data for now break success, in_sync_objs = ssync_sender(self, node, job, job['suffixes'])() self.rehash_remote(node, job, job['suffixes']) if success: syncd_with += 1 reverted_objs.update(in_sync_objs) if syncd_with >= len(job['sync_to']): self.delete_reverted_objs(job, reverted_objs, job['frag_index']) self.logger.timing_since('partition.delete.timing', begin)
def _revert(self, job, begin): """ Process a REVERT job. """ self.logger.increment( 'partition.delete.count.%s' % (job['local_dev']['device'],)) # we'd desperately like to push this partition back to it's # primary location, but if that node is down, the next best thing # is one of the handoff locations - which *might* be us already! dest_nodes = itertools.chain( job['sync_to'], job['policy'].object_ring.get_more_nodes(job['partition']), ) syncd_with = 0 reverted_objs = {} for node in dest_nodes: if syncd_with >= len(job['sync_to']): break if node['id'] == job['local_dev']['id']: # this is as good a place as any for this data for now break success, in_sync_objs = ssync_sender( self, node, job, job['suffixes'])() self.rehash_remote(node, job, job['suffixes']) if success: syncd_with += 1 reverted_objs.update(in_sync_objs) if syncd_with >= len(job['sync_to']): self.delete_reverted_objs( job, reverted_objs, job['frag_index']) self.logger.timing_since('partition.delete.timing', begin)
def _sync(self, job, begin): """ Process a SYNC job. """ self.logger.increment( 'partition.update.count.%s' % (job['local_dev']['device'],)) # after our left and right partners, if there's some sort of # failure we'll continue onto the remaining primary nodes and # make sure they're in sync - or potentially rebuild missing # fragments we find dest_nodes = itertools.chain( job['sync_to'], # I think we could order these based on our index to better # protect against a broken chain [ n for n in job['policy'].object_ring.get_part_nodes(job['partition']) if n['id'] != job['local_dev']['id'] and n['id'] not in (m['id'] for m in job['sync_to']) ], ) syncd_with = 0 for node in dest_nodes: if syncd_with >= len(job['sync_to']): # success! break try: suffixes = self._get_suffixes_to_sync(job, node) except SuffixSyncError: continue if not suffixes: syncd_with += 1 continue # ssync any out-of-sync suffixes with the remote node success, _ = ssync_sender( self, node, job, suffixes)() # let remote end know to rehash it's suffixes self.rehash_remote(node, job, suffixes) # update stats for this attempt self.suffix_sync += len(suffixes) self.logger.update_stats('suffix.syncs', len(suffixes)) if success: syncd_with += 1 self.logger.timing_since('partition.update.timing', begin)
def _revert(self, job, begin): """ Process a REVERT job. """ self.logger.increment('partition.delete.count.%s' % (job['local_dev']['device'], )) syncd_with = 0 reverted_objs = {} for node in job['sync_to']: success, in_sync_objs = ssync_sender(self, node, job, job['suffixes'])() self.rehash_remote(node, job, job['suffixes']) if success: syncd_with += 1 reverted_objs.update(in_sync_objs) if syncd_with >= len(job['sync_to']): self.delete_reverted_objs(job, reverted_objs, job['frag_index']) else: self.handoffs_remaining += 1 self.logger.timing_since('partition.delete.timing', begin)