def _lookup_key(self, key_id, group_ids): eid = elliptics.Id(key_id.encode('utf-8')) lookups = [] for group_id in group_ids: s = self.session.clone() s.set_exceptions_policy(elliptics.exceptions_policy.no_exceptions) s.add_groups([group_id]) lookups.append((s.lookup(eid), eid, group_id)) lookup_by_group = {} not_found_count = 0 def set_group_lookup(lookup, group_id, elapsed_time=None, end_time=None): global not_found_count if lookup.error.code: if lookup.error.code == -2: logger.warn( 'Key {}: lookup returned -2, group {}/{}'.format( key_id, lookup.group_id, group_id)) not_found_count += 1 return else: raise lookup.error lookup_by_group[group_id] = lookup logger.debug('Key {}: performing lookups on groups {}'.format( key_id, group_ids)) for result, eid, group_id in lookups: try: h.process_elliptics_async_result( result, set_group_lookup, group_id, raise_on_error=False) except Exception as e: logger.exception( 'Failed to lookup key {0} on group {1}'.format( eid, group_id)) continue if not lookup_by_group: if len(lookups) == not_found_count: raise ValueError('key has already been removed from couple') else: raise RuntimeError('all lookups for key failed') return lookup_by_group
def monitor_top_stats(self): try: start_ts = time.time() logger.info('Monitor top stats update started') logger.info('Before calculating routes') host_addrs = set(r.address for r in self.session.routes.get_unique_routes()) logger.info('Unique routes calculated') requests = [] for address in host_addrs: session = self.session.clone() session.set_direct_id(address) logger.debug('Request for top of node {0}'.format(address)) requests.append((session.monitor_stat(address, self.STAT_CATEGORIES), address)) new_top_keys = {} for result, address in requests: try: h.process_elliptics_async_result( result, self.update_top, new_top_keys) except Exception as e: logger.error( 'Failed to request monitor_stat for node {0}: ' '{1}\n{2}'.format(address, e, traceback.format_exc())) continue self.top_keys = new_top_keys self._try_distribute_keys() except Exception as e: logger.error('Failed to update monitor top stats: {0}\n{1}'.format( e, traceback.format_exc())) pass finally: logger.info( 'Monitor top stats update finished, time: {0:.3f}'.format( time.time() - start_ts)) self.__tq.add_task_at( CacheManager.MONITOR_TOP_STATS, self.top_update_timer.next(), self.monitor_top_stats)
def _key_by_dc(self, key): eid = elliptics.Id(key['id'].encode('utf-8')) lookups = [] # add other cache groups as a source when key size is fixed for group_id in key['data_groups']: s = self.session.clone() s.set_exceptions_policy(elliptics.exceptions_policy.no_exceptions) s.add_groups([group_id]) lookups.append((s.lookup(eid), eid, group_id)) key_by_dc = {} not_found_count = 0 def set_key_size_by_dc(lookup, group_id, elapsed_time=None, end_time=None): global not_found_count if lookup.error.code: if lookup.error.code == -2: not_found_count += 1 return else: raise lookup.error dc = storage.groups[group_id].node_backends[0].node.host.dc key_by_dc[dc] = { 'group': group_id, 'size': lookup.size, } for result, eid, group_id in lookups: try: h.process_elliptics_async_result( result, set_key_size_by_dc, group_id, raise_on_error=False) except Exception as e: logger.error( 'Failed to lookup key {0} on group {1}: {2}'.format( eid, group_id, e)) continue if len(lookups) == not_found_count: logger.info('Key {0}: has already been removed from couple'.format( key['id'])) return key_by_dc
def monitor_stats(self, groups=None): if groups: hosts = set((nb.node.host.addr, nb.node.port, nb.node.family) for g in groups for nb in g.node_backends) host_addrs = [elliptics.Address(*host) for host in hosts] else: logger.info('Before calculating routes') host_addrs = set(r.address for r in self.__session.routes.get_unique_routes()) logger.info('Unique routes calculated') requests = [] for address in host_addrs: session = self.__session.clone() session.set_direct_id(address) logger.debug('Request for monitor_stat of node {0}'.format( address)) requests.append((session.monitor_stat(address, self.MONITOR_STAT_CATEGORIES), address)) for result, address in requests: try: h.process_elliptics_async_result(result, self.update_statistics) except Exception as e: logger.error('Failed to request monitor_stat for node {0}: ' '{1}\n{2}'.format(address, e, traceback.format_exc())) continue nbs = (groups and [nb for g in groups for nb in g.node_backends] or storage.node_backends.keys()) for nb in nbs: nb.update_statistics_status() nb.update_status() fss = (groups and set(nb.fs for nb in nbs) or storage.fs.keys()) for fs in fss: fs.update_status() for group in groups or storage.groups.keys(): logger.info('Updating status for group {0}'.format(group.group_id)) group.update_status()
def update_symm_groups_async(self, groups=None): _queue = set() def _get_data_groups(group): return group.meta['couple'] def _get_lrc_groups(group): return group.meta['lrc']['groups'] def _create_groupset_if_needed(groups, group_type, ns_id): for gid in groups: if gid not in storage.groups: logger.info( 'Group {group} is not found, adding fake group ' 'for groupset {groups}'.format( group=gid, groups=groups, )) storage.groups.add(gid) groupset_str = ':'.join((str(gid) for gid in sorted(groups))) if groupset_str not in storage.groupsets: # TODO: somehow check that couple type matches group.type # for all groups in couple (not very easy when metakey read # fails) logger.info( 'Creating groupset {groups}, group type "{group_type}"'. format( groups=groupset_str, group_type=group_type, )) c = storage.groupsets.add( groups=(storage.groups[gid] for gid in groups), group_type=group_type, ) for gid in groups: infrastructure.update_group_history(storage.groups[gid]) if ns_id not in storage.namespaces: logger.info('Creating storage namespace {}'.format(ns_id)) ns = storage.namespaces.add(ns_id) else: ns = storage.namespaces[ns_id] ns.add_couple(c) return storage.groupsets[groupset_str] def _process_group_metadata(response, group, elapsed_time=None, end_time=None): logger.debug( 'Cluster updating: group {0} meta key read time: {1}.{2}'. format(group.group_id, elapsed_time.tsec, elapsed_time.tnsec)) if response.error.code: if response.error.code == errors.ELLIPTICS_NOT_FOUND: # This group is some kind of uncoupled group, not an error group.parse_meta(None) logger.info( 'Group {group} has no metakey'.format(group=group)) elif response.error.code in ( # Route list did not contain the group, expected error errors.ELLIPTICS_GROUP_NOT_IN_ROUTE_LIST, # Timeout in reading metakey from the group, expected error errors.ELLIPTICS_TIMEOUT, ): group.reset_meta() logger.error( 'Error on updating metakey from group {group}: {error}' .format( group=group, error=response.error.message, )) else: raise RuntimeError(response.error.mssage) return meta = response.data group.parse_meta(meta) if group.type == storage.Group.TYPE_UNCOUPLED_LRC_8_2_2_V1: return ns_id = group.meta.get('namespace') if ns_id is None: logger.error( 'Inconsistent meta read from group {group}, missing namespace: {meta}' .format( group=group, meta=group.meta, )) return if group.type == storage.Group.TYPE_DATA: groups = _get_data_groups(group) elif group.type == storage.Group.TYPE_LRC_8_2_2_V1: groups = _get_lrc_groups(group) elif group.type == storage.Group.TYPE_CACHE: groups = _get_data_groups(group) else: raise RuntimeError( 'Group {group_id}, unexpected type to process: {type}'. format( group_id=group.group_id, type=group.type, )) logger.info('Read symmetric groups from group {}: {}'.format( group.group_id, groups)) for gid in groups: if gid != group.group_id: logger.info('Scheduling update for group {}'.format(gid)) _queue.add(gid) groupset = _create_groupset_if_needed(groups, group.type, ns_id) if group.type == storage.Group.TYPE_LRC_8_2_2_V1: # TODO: this will become unnecessary when new "Couple" instance # is introduced data_groups = _get_data_groups(group) data_groupset = _create_groupset_if_needed( data_groups, storage.Group.TYPE_DATA, ns_id) data_groupset.lrc822v1_groupset = groupset # TODO: this should point to a new "Couple" object groupset.couple = data_groupset return try: check_groups = groups or storage.groups.keys() results = {} for group in check_groups: session = self.__session.clone() session.set_exceptions_policy( elliptics.exceptions_policy.no_exceptions) session.set_filter(elliptics.filters.all_with_ack) session.add_groups([group.group_id]) logger.debug('Request to read {0} for group {1}'.format( keys.SYMMETRIC_GROUPS_KEY.replace('\0', '\\0'), group.group_id)) results[group.group_id] = session.read_data( keys.SYMMETRIC_GROUPS_KEY) jobs = {} if self.job_finder: try: params = {'statuses': Job.ACTIVE_STATUSES} if groups: params['groups'] = [g.group_id for g in groups] for job in self.job_finder.jobs(**params): # TODO: this should definitely be done some other way if hasattr(job, 'group'): jobs[job.group] = job except Exception as e: logger.exception( 'Failed to fetch pending jobs: {0}'.format(e)) pass while results: # TODO: Think on queue, it does not work well with lrc couples if _queue: group_id = _queue.pop() if group_id not in results: continue result = results.pop(group_id) else: group_id, result = results.popitem() group = storage.groups[group_id] try: h.process_elliptics_async_result( result, _process_group_metadata, group, raise_on_error=False, ) except Exception as e: logger.exception( 'Critical error on updating metakey from group {}'. format(group_id)) group.parse_meta(None) finally: try: group.set_active_job(jobs.get(group.group_id)) except Exception as e: logger.exception( 'Failed to set group active job: {}'.format(e)) pass try: group.update_status_recursive() except Exception as e: logger.exception( 'Failed to update group {0} status: {1}'.format( group, e)) pass if groups is None: self.update_couple_settings() load_manager.update(storage) weight_manager.update(storage) infrastructure.schedule_history_update() except Exception as e: logger.exception('Critical error during symmetric group update')
def update_symm_groups_async(self, groups=None): _queue = set() def _process_group_metadata(response, group, elapsed_time=None, end_time=None): logger.debug( 'Cluster updating: group {0} meta key read time: {1}.{2}'. format(group.group_id, elapsed_time.tsec, elapsed_time.tnsec)) meta = response.data group.parse_meta(meta) couple = group.meta.get('couple') if couple is None: logger.error( 'Read symmetric groups from group {} (no couple data): {}'. format(group.group_id, meta)) return logger.info('Read symmetric groups from group {}: {}'.format( group.group_id, couple)) for gid in couple: if gid != group.group_id: logger.info('Scheduling update for group {}'.format(gid)) _queue.add(gid) couple_str = ':'.join((str(gid) for gid in sorted(couple))) logger.debug('{0} in storage.couples: {1}'.format( couple_str, couple_str in storage.couples)) if couple_str not in storage.couples and couple_str not in storage.cache_couples: ns_id = group.meta.get('namespace') if ns_id is None: logger.error('Inconsistent meta read from group {}, ' 'missing namespace: {}'.format( group, group.meta)) return if group.type == storage.Group.TYPE_DATA: logger.info('Creating couple {0}'.format(couple_str)) for gid in couple: if gid not in storage.groups: logger.info( 'Group {} is not found adding fake group for ' 'couple {}'.format(gid, couple)) storage.groups.add(gid) c = storage.couples.add( [storage.groups[gid] for gid in couple]) logger.info('Created couple {0} {1}'.format(c, repr(c))) elif group.type == storage.Group.TYPE_CACHE: logger.info('Creating cache couple {0}'.format(couple_str)) c = storage.cache_couples.add( [storage.groups[gid] for gid in couple]) logger.info('Created cache couple {0} {1}'.format( c, repr(c))) else: raise ValueError( 'Unknown group type for group {}: {}'.format( group, group.type)) for gid in couple: infrastructure.update_group_history(storage.groups[gid]) if ns_id not in storage.namespaces: logger.info('Creating storage namespace {}'.format(ns_id)) ns = storage.namespaces.add(ns_id) else: ns = storage.namespaces[ns_id] ns.add_couple(c) return try: check_groups = groups or storage.groups.keys() results = {} for group in check_groups: session = self.__session.clone() session.add_groups([group.group_id]) logger.debug('Request to read {0} for group {1}'.format( keys.SYMMETRIC_GROUPS_KEY.replace('\0', '\\0'), group.group_id)) results[group.group_id] = session.read_data( keys.SYMMETRIC_GROUPS_KEY) jobs = {} if self.job_finder: try: params = {'statuses': Job.ACTIVE_STATUSES} if groups: params['groups'] = [g.group_id for g in groups] for job in self.job_finder.jobs(**params): jobs[job.group] = job except Exception as e: logger.exception( 'Failed to fetch pending jobs: {0}'.format(e)) pass while results: if _queue: group_id = _queue.pop() if group_id not in results: continue result = results.pop(group_id) else: group_id, result = results.popitem() group = storage.groups[group_id] try: h.process_elliptics_async_result(result, _process_group_metadata, group) except elliptics.NotFoundError as e: logger.warn( 'Failed to read symmetric_groups from group {0}: {1}'. format(group_id, e)) group.parse_meta(None) except Exception as e: logger.exception( 'Failed to read symmetric_groups from group {0}: {1}'. format(group_id, e)) group.parse_meta(None) finally: try: group.set_active_job(jobs.get(group.group_id)) except Exception as e: logger.exception( 'Failed to set group active job: {}'.format(e)) pass try: group.update_status_recursive() except Exception as e: logger.exception( 'Failed to update group {0} status: {1}'.format( group, e)) pass load_manager.update(storage) weight_manager.update(storage) infrastructure.schedule_history_update() except Exception as e: logger.exception('Critical error during symmetric group update')
def update_symm_groups_async(self, groups=None): _queue = set() def _process_group_metadata(response, group, elapsed_time=None, end_time=None): logger.debug('Cluster updating: group {0} meta key read time: {1}.{2}'.format( group.group_id, elapsed_time.tsec, elapsed_time.tnsec)) meta = response.data group.parse_meta(meta) couple = group.meta.get('couple') if couple is None: logger.error('Read symmetric groups from group {} (no couple data): {}'.format( group.group_id, group.meta)) return logger.info('Read symmetric groups from group {}: {}'.format(group.group_id, couple)) for gid in couple: if gid != group.group_id: logger.info('Scheduling update for group {}'.format(gid)) _queue.add(gid) couple_str = ':'.join((str(gid) for gid in sorted(couple))) logger.debug('{0} in storage.couples: {1}'.format( couple_str, couple_str in storage.couples)) if couple_str not in storage.couples and couple_str not in storage.cache_couples: ns_id = group.meta.get('namespace') if ns_id is None: logger.error('Inconsistent meta read from group {}, ' 'missing namespace: {}'.format(group, group.meta)) return if group.type == storage.Group.TYPE_DATA: logger.info('Creating couple {0}'.format(couple_str)) for gid in couple: if gid not in storage.groups: logger.info('Group {} is not found adding fake group for ' 'couple {}'.format(gid, couple)) storage.groups.add(gid) c = storage.couples.add(storage.groups[gid] for gid in couple) logger.info('Created couple {0} {1}'.format(c, repr(c))) elif group.type == storage.Group.TYPE_CACHE: logger.info('Creating cache couple {0}'.format(couple_str)) c = storage.cache_couples.add(storage.groups[gid] for gid in couple) logger.info('Created cache couple {0} {1}'.format(c, repr(c))) else: raise ValueError('Unknown group type for group {}: {}'.format( group, group.type)) for gid in couple: infrastructure.update_group_history(storage.groups[gid]) if ns_id not in storage.namespaces: logger.info('Creating storage namespace {}'.format(ns_id)) ns = storage.namespaces.add(ns_id) else: ns = storage.namespaces[ns_id] ns.add_couple(c) return try: check_groups = groups or storage.groups.keys() results = {} for group in check_groups: session = self.__session.clone() session.add_groups([group.group_id]) logger.debug('Request to read {0} for group {1}'.format( keys.SYMMETRIC_GROUPS_KEY.replace('\0', '\\0'), group.group_id)) results[group.group_id] = session.read_data(keys.SYMMETRIC_GROUPS_KEY) jobs = {} if self.job_finder: try: params = {'statuses': Job.ACTIVE_STATUSES} if groups: params['groups'] = [g.group_id for g in groups] for job in self.job_finder.jobs(**params): jobs[job.group] = job except Exception as e: logger.exception('Failed to fetch pending jobs: {0}'.format(e)) pass while results: if _queue: group_id = _queue.pop() if group_id not in results: continue result = results.pop(group_id) else: group_id, result = results.popitem() group = storage.groups[group_id] try: h.process_elliptics_async_result(result, _process_group_metadata, group) except elliptics.NotFoundError as e: logger.warn('Failed to read symmetric_groups from group {0}: {1}'.format( group_id, e)) group.parse_meta(None) except Exception as e: logger.exception('Failed to read symmetric_groups from group {0}: {1}'.format( group_id, e)) group.parse_meta(None) finally: try: group.set_active_job(jobs.get(group.group_id)) except Exception as e: logger.exception('Failed to set group active job: {}'.format(e)) pass try: group.update_status_recursive() except Exception as e: logger.exception('Failed to update group {0} status: {1}'.format(group, e)) pass if groups is None: load_manager.update(storage) weight_manager.update(storage) infrastructure.schedule_history_update() except Exception as e: logger.exception('Critical error during symmetric group update')
def update_symm_groups_async(self, groups=None): _queue = set() def _process_group_metadata(response, group, elapsed_time=None, end_time=None): logger.debug('Cluster updating: group {0} meta key read time: {1}.{2}'.format( group.group_id, elapsed_time.tsec, elapsed_time.tnsec)) meta = response.data group.parse_meta(meta) couple = group.meta['couple'] logger.info('Read symmetric groups from group ' '{0}: {1}'.format(group.group_id, couple)) for gid in couple: if gid != group.group_id: logger.info('Scheduling update ' 'for group {0}'.format(gid)) _queue.add(gid) couple_str = ':'.join((str(gid) for gid in sorted(couple))) logger.debug('{0} in storage.couples: {1}'.format( couple_str, couple_str in storage.couples)) if not couple_str in storage.couples: logger.info('Creating couple {0}'.format(couple_str)) for gid in couple: if not gid in storage.groups: logger.info("Group {0} doesn't exist in " "all_groups, add fake data with couple={1}".format(gid, couple)) storage.groups.add(gid) c = storage.couples.add([storage.groups[gid] for gid in couple]) logger.info('Created couple {0} {1}'.format(c, repr(c))) return try: groups = groups or storage.groups.keys() results = {} for group in groups: session = self.__session.clone() session.add_groups([group.group_id]) logger.debug('Request to read {0} for group {1}'.format( keys.SYMMETRIC_GROUPS_KEY.replace('\0', '\\0'), group.group_id)) results[group.group_id] = session.read_data(keys.SYMMETRIC_GROUPS_KEY) while results: if _queue: group_id = _queue.pop() if group_id not in results: continue result = results.pop(group_id) else: group_id, result = results.popitem() group = storage.groups[group_id] try: h.process_elliptics_async_result(result, _process_group_metadata, group) except elliptics.NotFoundError as e: logger.warn('Failed to read symmetric_groups ' 'from group {0}: {1}'.format(group_id, e)) group.parse_meta(None) except Exception as e: logger.error('Failed to read symmetric_groups ' 'from group {0}: {1}\n{2}'.format( group_id, e, traceback.format_exc())) group.parse_meta(None) finally: try: group.update_status_recursive() except Exception as e: logger.error('Failed to update group {0} status: ' '{1}\n{2}'.format(group, e, traceback.format_exc())) pass except Exception as e: logger.error('Critical error during symmetric group ' 'update, {0}: {1}'.format(str(e), traceback.format_exc()))
def update_symm_groups_async(self, groups=None): _queue = set() def _get_data_groups(group): return group.meta['couple'] def _get_lrc_groups(group): return group.meta['lrc']['groups'] def _create_groupset_if_needed(groups, group_type, ns_id): for gid in groups: if gid not in storage.groups: logger.info( 'Group {group} is not found, adding fake group ' 'for groupset {groups}'.format( group=gid, groups=groups, ) ) storage.groups.add(gid) groupset_str = ':'.join((str(gid) for gid in sorted(groups))) if groupset_str not in storage.groupsets: # TODO: somehow check that couple type matches group.type # for all groups in couple (not very easy when metakey read # fails) logger.info('Creating groupset {groups}, group type "{group_type}"'.format( groups=groupset_str, group_type=group_type, )) c = storage.groupsets.add( groups=(storage.groups[gid] for gid in groups), group_type=group_type, ) for gid in groups: infrastructure.update_group_history(storage.groups[gid]) if ns_id not in storage.namespaces: logger.info('Creating storage namespace {}'.format(ns_id)) ns = storage.namespaces.add(ns_id) else: ns = storage.namespaces[ns_id] ns.add_couple(c) return storage.groupsets[groupset_str] def _process_group_metadata(response, group, elapsed_time=None, end_time=None): logger.debug('Cluster updating: group {0} meta key read time: {1}.{2}'.format( group.group_id, elapsed_time.tsec, elapsed_time.tnsec)) if response.error.code: if response.error.code == errors.ELLIPTICS_NOT_FOUND: # This group is some kind of uncoupled group, not an error group.parse_meta(None) logger.info( 'Group {group} has no metakey'.format(group=group) ) elif response.error.code in ( # Route list did not contain the group, expected error errors.ELLIPTICS_GROUP_NOT_IN_ROUTE_LIST, # Timeout in reading metakey from the group, expected error errors.ELLIPTICS_TIMEOUT, ): group.reset_meta() logger.error( 'Error on updating metakey from group {group}: {error}'.format( group=group, error=response.error.message, ) ) else: raise RuntimeError(response.error.mssage) return meta = response.data group.parse_meta(meta) if group.type == storage.Group.TYPE_UNCOUPLED_LRC_8_2_2_V1: return ns_id = group.meta.get('namespace') if ns_id is None: logger.error( 'Inconsistent meta read from group {group}, missing namespace: {meta}'.format( group=group, meta=group.meta, ) ) return if group.type == storage.Group.TYPE_DATA: groups = _get_data_groups(group) elif group.type == storage.Group.TYPE_LRC_8_2_2_V1: groups = _get_lrc_groups(group) elif group.type == storage.Group.TYPE_CACHE: groups = _get_data_groups(group) else: raise RuntimeError( 'Group {group_id}, unexpected type to process: {type}'.format( group_id=group.group_id, type=group.type, ) ) logger.info('Read symmetric groups from group {}: {}'.format(group.group_id, groups)) for gid in groups: if gid != group.group_id: logger.info('Scheduling update for group {}'.format(gid)) _queue.add(gid) groupset = _create_groupset_if_needed(groups, group.type, ns_id) if group.type == storage.Group.TYPE_LRC_8_2_2_V1: # TODO: this will become unnecessary when new "Couple" instance # is introduced data_groups = _get_data_groups(group) data_groupset = _create_groupset_if_needed( data_groups, storage.Group.TYPE_DATA, ns_id ) data_groupset.lrc822v1_groupset = groupset # TODO: this should point to a new "Couple" object groupset.couple = data_groupset return try: check_groups = groups or storage.groups.keys() results = {} for group in check_groups: session = self.__session.clone() session.set_exceptions_policy(elliptics.exceptions_policy.no_exceptions) session.set_filter(elliptics.filters.all_with_ack) session.add_groups([group.group_id]) logger.debug('Request to read {0} for group {1}'.format( keys.SYMMETRIC_GROUPS_KEY.replace('\0', '\\0'), group.group_id)) results[group.group_id] = session.read_data(keys.SYMMETRIC_GROUPS_KEY) jobs = {} if self.job_finder: try: params = {'statuses': Job.ACTIVE_STATUSES} if groups: params['groups'] = [g.group_id for g in groups] for job in self.job_finder.jobs(**params): # TODO: this should definitely be done some other way if hasattr(job, 'group'): jobs[job.group] = job except Exception as e: logger.exception('Failed to fetch pending jobs: {0}'.format(e)) pass while results: # TODO: Think on queue, it does not work well with lrc couples if _queue: group_id = _queue.pop() if group_id not in results: continue result = results.pop(group_id) else: group_id, result = results.popitem() group = storage.groups[group_id] try: h.process_elliptics_async_result( result, _process_group_metadata, group, raise_on_error=False, ) except Exception as e: logger.exception( 'Critical error on updating metakey from group {}'.format(group_id) ) group.parse_meta(None) finally: try: group.set_active_job(jobs.get(group.group_id)) except Exception as e: logger.exception('Failed to set group active job: {}'.format(e)) pass try: group.update_status_recursive() except Exception as e: logger.exception('Failed to update group {0} status: {1}'.format(group, e)) pass if groups is None: self.update_couple_settings() load_manager.update(storage) weight_manager.update(storage) infrastructure.schedule_history_update() except Exception as e: logger.exception('Critical error during symmetric group update')