Ejemplo n.º 1
0
    def monitor_stats(self, groups=None):
        if groups:
            hosts = set((nb.node.host.addr, nb.node.port, nb.node.family)
                        for g in groups for nb in g.node_backends)
            host_addrs = [elliptics.Address(*host) for host in hosts]
        else:
            logger.info('Before calculating routes')
            host_addrs = self.__session.routes.addresses()
            logger.info('Unique routes calculated')

        for ha in host_addrs:
            node_addr = '{host}:{port}'.format(
                host=ha.host,
                port=ha.port,
            )
            if ha.host not in storage.hosts:
                logger.debug('Adding host {}'.format(ha.host))
                host = storage.hosts.add(ha.host)
            else:
                host = storage.hosts[ha.host]
            if node_addr not in storage.nodes:
                logger.debug('Adding node {}'.format(node_addr))
                storage.nodes.add(host, ha.port, ha.family)

        responses_collected = 0
        for node, result in self._do_get_monitor_stats(host_addrs):
            responses_collected += 1
            self.update_statistics(
                node,
                result['content'],
                elapsed_time=result['request_time']
            )

        logger.info(
            'Number of hosts in route table: {}, responses collected {}'.format(
                len(host_addrs),
                responses_collected,
            )
        )

        # TODO: can we use iterkeys?
        nbs = (groups and
               [nb for g in groups for nb in g.node_backends] or
               storage.node_backends.keys())
        for nb in nbs:
            nb.update_statistics_status()
            nb.update_status()

        # TODO: can we use iterkeys?
        fss = (groups and set(nb.fs for nb in nbs) or storage.fs.keys())
        for fs in fss:
            fs.update_status()

        for group in groups or storage.groups.keys():
            logger.info('Updating status for group {0}'.format(group.group_id))
            group.update_status()

        if groups is None:
            storage.dc_host_view.update()
            load_manager.update(storage)
Ejemplo n.º 2
0
    def monitor_stats(self, groups=None):
        if groups:
            hosts = set((nb.node.host.addr, nb.node.port, nb.node.family)
                        for g in groups for nb in g.node_backends)
            host_addrs = [elliptics.Address(*host) for host in hosts]
        else:
            logger.info('Before calculating routes')
            host_addrs = self.__session.routes.addresses()
            logger.info('Unique routes calculated')

        for ha in host_addrs:
            node_addr = '{host}:{port}'.format(
                host=ha.host,
                port=ha.port,
            )
            if ha.host not in storage.hosts:
                logger.debug('Adding host {}'.format(ha.host))
                host = storage.hosts.add(ha.host)
            else:
                host = storage.hosts[ha.host]
            if node_addr not in storage.nodes:
                logger.debug('Adding node {}'.format(node_addr))
                storage.nodes.add(host, ha.port, ha.family)

        responses_collected = 0
        for node, result in self._do_get_monitor_stats(host_addrs):
            responses_collected += 1
            self.update_statistics(node,
                                   result['content'],
                                   elapsed_time=result['request_time'])

        logger.info(
            'Number of hosts in route table: {}, responses collected {}'.
            format(
                len(host_addrs),
                responses_collected,
            ))

        # TODO: can we use iterkeys?
        nbs = (groups and [nb for g in groups for nb in g.node_backends]
               or storage.node_backends.keys())
        for nb in nbs:
            nb.update_statistics_status()
            nb.update_status()

        # TODO: can we use iterkeys?
        fss = (groups and set(nb.fs for nb in nbs) or storage.fs.keys())
        for fs in fss:
            fs.update_status()

        for group in groups or storage.groups.keys():
            logger.info('Updating status for group {0}'.format(group.group_id))
            group.update_status()

        if groups is None:
            storage.dc_host_view.update()
            load_manager.update(storage)
Ejemplo n.º 3
0
    def update_symm_groups_async(self, groups=None):

        _queue = set()

        def _get_data_groups(group):
            return group.meta['couple']

        def _get_lrc_groups(group):
            return group.meta['lrc']['groups']

        def _create_groupset_if_needed(groups, group_type, ns_id):

            for gid in groups:
                if gid not in storage.groups:
                    logger.info(
                        'Group {group} is not found, adding fake group '
                        'for groupset {groups}'.format(
                            group=gid,
                            groups=groups,
                        ))
                    storage.groups.add(gid)

            groupset_str = ':'.join((str(gid) for gid in sorted(groups)))
            if groupset_str not in storage.groupsets:
                # TODO: somehow check that couple type matches group.type
                # for all groups in couple (not very easy when metakey read
                # fails)
                logger.info(
                    'Creating groupset {groups}, group type "{group_type}"'.
                    format(
                        groups=groupset_str,
                        group_type=group_type,
                    ))
                c = storage.groupsets.add(
                    groups=(storage.groups[gid] for gid in groups),
                    group_type=group_type,
                )

                for gid in groups:
                    infrastructure.update_group_history(storage.groups[gid])

                if ns_id not in storage.namespaces:
                    logger.info('Creating storage namespace {}'.format(ns_id))
                    ns = storage.namespaces.add(ns_id)
                else:
                    ns = storage.namespaces[ns_id]

                ns.add_couple(c)
            return storage.groupsets[groupset_str]

        def _process_group_metadata(response,
                                    group,
                                    elapsed_time=None,
                                    end_time=None):
            logger.debug(
                'Cluster updating: group {0} meta key read time: {1}.{2}'.
                format(group.group_id, elapsed_time.tsec, elapsed_time.tnsec))

            if response.error.code:
                if response.error.code == errors.ELLIPTICS_NOT_FOUND:
                    # This group is some kind of uncoupled group, not an error
                    group.parse_meta(None)
                    logger.info(
                        'Group {group} has no metakey'.format(group=group))
                elif response.error.code in (
                        # Route list did not contain the group, expected error
                        errors.ELLIPTICS_GROUP_NOT_IN_ROUTE_LIST,
                        # Timeout in reading metakey from the group, expected error
                        errors.ELLIPTICS_TIMEOUT,
                ):
                    group.reset_meta()
                    logger.error(
                        'Error on updating metakey from group {group}: {error}'
                        .format(
                            group=group,
                            error=response.error.message,
                        ))
                else:
                    raise RuntimeError(response.error.mssage)

                return

            meta = response.data

            group.parse_meta(meta)

            if group.type == storage.Group.TYPE_UNCOUPLED_LRC_8_2_2_V1:
                return

            ns_id = group.meta.get('namespace')
            if ns_id is None:
                logger.error(
                    'Inconsistent meta read from group {group}, missing namespace: {meta}'
                    .format(
                        group=group,
                        meta=group.meta,
                    ))
                return

            if group.type == storage.Group.TYPE_DATA:
                groups = _get_data_groups(group)
            elif group.type == storage.Group.TYPE_LRC_8_2_2_V1:
                groups = _get_lrc_groups(group)
            elif group.type == storage.Group.TYPE_CACHE:
                groups = _get_data_groups(group)
            else:
                raise RuntimeError(
                    'Group {group_id}, unexpected type to process: {type}'.
                    format(
                        group_id=group.group_id,
                        type=group.type,
                    ))

            logger.info('Read symmetric groups from group {}: {}'.format(
                group.group_id, groups))

            for gid in groups:
                if gid != group.group_id:
                    logger.info('Scheduling update for group {}'.format(gid))
                    _queue.add(gid)

            groupset = _create_groupset_if_needed(groups, group.type, ns_id)

            if group.type == storage.Group.TYPE_LRC_8_2_2_V1:
                # TODO: this will become unnecessary when new "Couple" instance
                # is introduced
                data_groups = _get_data_groups(group)
                data_groupset = _create_groupset_if_needed(
                    data_groups, storage.Group.TYPE_DATA, ns_id)
                data_groupset.lrc822v1_groupset = groupset
                # TODO: this should point to a new "Couple" object
                groupset.couple = data_groupset
            return

        try:
            check_groups = groups or storage.groups.keys()

            results = {}
            for group in check_groups:
                session = self.__session.clone()
                session.set_exceptions_policy(
                    elliptics.exceptions_policy.no_exceptions)
                session.set_filter(elliptics.filters.all_with_ack)
                session.add_groups([group.group_id])

                logger.debug('Request to read {0} for group {1}'.format(
                    keys.SYMMETRIC_GROUPS_KEY.replace('\0', '\\0'),
                    group.group_id))
                results[group.group_id] = session.read_data(
                    keys.SYMMETRIC_GROUPS_KEY)

            jobs = {}
            if self.job_finder:
                try:
                    params = {'statuses': Job.ACTIVE_STATUSES}
                    if groups:
                        params['groups'] = [g.group_id for g in groups]
                    for job in self.job_finder.jobs(**params):
                        # TODO: this should definitely be done some other way
                        if hasattr(job, 'group'):
                            jobs[job.group] = job
                except Exception as e:
                    logger.exception(
                        'Failed to fetch pending jobs: {0}'.format(e))
                    pass

            while results:
                # TODO: Think on queue, it does not work well with lrc couples
                if _queue:
                    group_id = _queue.pop()
                    if group_id not in results:
                        continue
                    result = results.pop(group_id)
                else:
                    group_id, result = results.popitem()

                group = storage.groups[group_id]

                try:
                    h.process_elliptics_async_result(
                        result,
                        _process_group_metadata,
                        group,
                        raise_on_error=False,
                    )
                except Exception as e:
                    logger.exception(
                        'Critical error on updating metakey from group {}'.
                        format(group_id))
                    group.parse_meta(None)
                finally:
                    try:
                        group.set_active_job(jobs.get(group.group_id))
                    except Exception as e:
                        logger.exception(
                            'Failed to set group active job: {}'.format(e))
                        pass
                    try:
                        group.update_status_recursive()
                    except Exception as e:
                        logger.exception(
                            'Failed to update group {0} status: {1}'.format(
                                group, e))
                        pass

            if groups is None:
                self.update_couple_settings()
                load_manager.update(storage)
                weight_manager.update(storage)

                infrastructure.schedule_history_update()

        except Exception as e:
            logger.exception('Critical error during symmetric group update')
Ejemplo n.º 4
0
    def update_symm_groups_async(self, groups=None):

        _queue = set()

        def _process_group_metadata(response,
                                    group,
                                    elapsed_time=None,
                                    end_time=None):
            logger.debug(
                'Cluster updating: group {0} meta key read time: {1}.{2}'.
                format(group.group_id, elapsed_time.tsec, elapsed_time.tnsec))
            meta = response.data

            group.parse_meta(meta)
            couple = group.meta.get('couple')
            if couple is None:
                logger.error(
                    'Read symmetric groups from group {} (no couple data): {}'.
                    format(group.group_id, meta))
                return

            logger.info('Read symmetric groups from group {}: {}'.format(
                group.group_id, couple))
            for gid in couple:
                if gid != group.group_id:
                    logger.info('Scheduling update for group {}'.format(gid))
                    _queue.add(gid)

            couple_str = ':'.join((str(gid) for gid in sorted(couple)))

            logger.debug('{0} in storage.couples: {1}'.format(
                couple_str, couple_str in storage.couples))

            if couple_str not in storage.couples and couple_str not in storage.cache_couples:

                ns_id = group.meta.get('namespace')
                if ns_id is None:
                    logger.error('Inconsistent meta read from group {}, '
                                 'missing namespace: {}'.format(
                                     group, group.meta))
                    return

                if group.type == storage.Group.TYPE_DATA:
                    logger.info('Creating couple {0}'.format(couple_str))
                    for gid in couple:
                        if gid not in storage.groups:
                            logger.info(
                                'Group {} is not found adding fake group for '
                                'couple {}'.format(gid, couple))
                            storage.groups.add(gid)
                    c = storage.couples.add(
                        [storage.groups[gid] for gid in couple])
                    logger.info('Created couple {0} {1}'.format(c, repr(c)))
                elif group.type == storage.Group.TYPE_CACHE:
                    logger.info('Creating cache couple {0}'.format(couple_str))
                    c = storage.cache_couples.add(
                        [storage.groups[gid] for gid in couple])
                    logger.info('Created cache couple {0} {1}'.format(
                        c, repr(c)))
                else:
                    raise ValueError(
                        'Unknown group type for group {}: {}'.format(
                            group, group.type))

                for gid in couple:
                    infrastructure.update_group_history(storage.groups[gid])

                if ns_id not in storage.namespaces:
                    logger.info('Creating storage namespace {}'.format(ns_id))
                    ns = storage.namespaces.add(ns_id)
                else:
                    ns = storage.namespaces[ns_id]

                ns.add_couple(c)
            return

        try:
            check_groups = groups or storage.groups.keys()

            results = {}
            for group in check_groups:
                session = self.__session.clone()
                session.add_groups([group.group_id])

                logger.debug('Request to read {0} for group {1}'.format(
                    keys.SYMMETRIC_GROUPS_KEY.replace('\0', '\\0'),
                    group.group_id))
                results[group.group_id] = session.read_data(
                    keys.SYMMETRIC_GROUPS_KEY)

            jobs = {}
            if self.job_finder:
                try:
                    params = {'statuses': Job.ACTIVE_STATUSES}
                    if groups:
                        params['groups'] = [g.group_id for g in groups]
                    for job in self.job_finder.jobs(**params):
                        jobs[job.group] = job
                except Exception as e:
                    logger.exception(
                        'Failed to fetch pending jobs: {0}'.format(e))
                    pass

            while results:
                if _queue:
                    group_id = _queue.pop()
                    if group_id not in results:
                        continue
                    result = results.pop(group_id)
                else:
                    group_id, result = results.popitem()

                group = storage.groups[group_id]

                try:
                    h.process_elliptics_async_result(result,
                                                     _process_group_metadata,
                                                     group)
                except elliptics.NotFoundError as e:
                    logger.warn(
                        'Failed to read symmetric_groups from group {0}: {1}'.
                        format(group_id, e))
                    group.parse_meta(None)
                except Exception as e:
                    logger.exception(
                        'Failed to read symmetric_groups from group {0}: {1}'.
                        format(group_id, e))
                    group.parse_meta(None)
                finally:
                    try:
                        group.set_active_job(jobs.get(group.group_id))
                    except Exception as e:
                        logger.exception(
                            'Failed to set group active job: {}'.format(e))
                        pass
                    try:
                        group.update_status_recursive()
                    except Exception as e:
                        logger.exception(
                            'Failed to update group {0} status: {1}'.format(
                                group, e))
                        pass

            load_manager.update(storage)
            weight_manager.update(storage)

            infrastructure.schedule_history_update()

        except Exception as e:
            logger.exception('Critical error during symmetric group update')
Ejemplo n.º 5
0
    def update_symm_groups_async(self, groups=None):

        _queue = set()

        def _process_group_metadata(response, group, elapsed_time=None, end_time=None):
            logger.debug('Cluster updating: group {0} meta key read time: {1}.{2}'.format(
                group.group_id, elapsed_time.tsec, elapsed_time.tnsec))
            meta = response.data

            group.parse_meta(meta)
            couple = group.meta.get('couple')
            if couple is None:
                logger.error('Read symmetric groups from group {} (no couple data): {}'.format(
                    group.group_id, group.meta))
                return

            logger.info('Read symmetric groups from group {}: {}'.format(group.group_id, couple))
            for gid in couple:
                if gid != group.group_id:
                    logger.info('Scheduling update for group {}'.format(gid))
                    _queue.add(gid)

            couple_str = ':'.join((str(gid) for gid in sorted(couple)))

            logger.debug('{0} in storage.couples: {1}'.format(
                couple_str, couple_str in storage.couples))

            if couple_str not in storage.couples and couple_str not in storage.cache_couples:

                ns_id = group.meta.get('namespace')
                if ns_id is None:
                    logger.error('Inconsistent meta read from group {}, '
                                 'missing namespace: {}'.format(group, group.meta))
                    return

                if group.type == storage.Group.TYPE_DATA:
                    logger.info('Creating couple {0}'.format(couple_str))
                    for gid in couple:
                        if gid not in storage.groups:
                            logger.info('Group {} is not found adding fake group for '
                                        'couple {}'.format(gid, couple))
                            storage.groups.add(gid)
                    c = storage.couples.add(storage.groups[gid] for gid in couple)
                    logger.info('Created couple {0} {1}'.format(c, repr(c)))
                elif group.type == storage.Group.TYPE_CACHE:
                    logger.info('Creating cache couple {0}'.format(couple_str))
                    c = storage.cache_couples.add(storage.groups[gid] for gid in couple)
                    logger.info('Created cache couple {0} {1}'.format(c, repr(c)))
                else:
                    raise ValueError('Unknown group type for group {}: {}'.format(
                        group, group.type))

                for gid in couple:
                    infrastructure.update_group_history(storage.groups[gid])

                if ns_id not in storage.namespaces:
                    logger.info('Creating storage namespace {}'.format(ns_id))
                    ns = storage.namespaces.add(ns_id)
                else:
                    ns = storage.namespaces[ns_id]

                ns.add_couple(c)
            return

        try:
            check_groups = groups or storage.groups.keys()

            results = {}
            for group in check_groups:
                session = self.__session.clone()
                session.add_groups([group.group_id])

                logger.debug('Request to read {0} for group {1}'.format(
                    keys.SYMMETRIC_GROUPS_KEY.replace('\0', '\\0'), group.group_id))
                results[group.group_id] = session.read_data(keys.SYMMETRIC_GROUPS_KEY)

            jobs = {}
            if self.job_finder:
                try:
                    params = {'statuses': Job.ACTIVE_STATUSES}
                    if groups:
                        params['groups'] = [g.group_id for g in groups]
                    for job in self.job_finder.jobs(**params):
                        jobs[job.group] = job
                except Exception as e:
                    logger.exception('Failed to fetch pending jobs: {0}'.format(e))
                    pass

            while results:
                if _queue:
                    group_id = _queue.pop()
                    if group_id not in results:
                        continue
                    result = results.pop(group_id)
                else:
                    group_id, result = results.popitem()

                group = storage.groups[group_id]

                try:
                    h.process_elliptics_async_result(result, _process_group_metadata, group)
                except elliptics.NotFoundError as e:
                    logger.warn('Failed to read symmetric_groups from group {0}: {1}'.format(
                        group_id, e))
                    group.parse_meta(None)
                except Exception as e:
                    logger.exception('Failed to read symmetric_groups from group {0}: {1}'.format(
                        group_id, e))
                    group.parse_meta(None)
                finally:
                    try:
                        group.set_active_job(jobs.get(group.group_id))
                    except Exception as e:
                        logger.exception('Failed to set group active job: {}'.format(e))
                        pass
                    try:
                        group.update_status_recursive()
                    except Exception as e:
                        logger.exception('Failed to update group {0} status: {1}'.format(group, e))
                        pass

            if groups is None:
                load_manager.update(storage)
                weight_manager.update(storage)

                infrastructure.schedule_history_update()

        except Exception as e:
            logger.exception('Critical error during symmetric group update')
Ejemplo n.º 6
0
    def update_symm_groups_async(self, groups=None):

        _queue = set()

        def _get_data_groups(group):
            return group.meta['couple']

        def _get_lrc_groups(group):
            return group.meta['lrc']['groups']

        def _create_groupset_if_needed(groups, group_type, ns_id):

            for gid in groups:
                if gid not in storage.groups:
                    logger.info(
                        'Group {group} is not found, adding fake group '
                        'for groupset {groups}'.format(
                            group=gid,
                            groups=groups,
                        )
                    )
                    storage.groups.add(gid)

            groupset_str = ':'.join((str(gid) for gid in sorted(groups)))
            if groupset_str not in storage.groupsets:
                # TODO: somehow check that couple type matches group.type
                # for all groups in couple (not very easy when metakey read
                # fails)
                logger.info('Creating groupset {groups}, group type "{group_type}"'.format(
                    groups=groupset_str,
                    group_type=group_type,
                ))
                c = storage.groupsets.add(
                    groups=(storage.groups[gid] for gid in groups),
                    group_type=group_type,
                )

                for gid in groups:
                    infrastructure.update_group_history(storage.groups[gid])

                if ns_id not in storage.namespaces:
                    logger.info('Creating storage namespace {}'.format(ns_id))
                    ns = storage.namespaces.add(ns_id)
                else:
                    ns = storage.namespaces[ns_id]

                ns.add_couple(c)
            return storage.groupsets[groupset_str]

        def _process_group_metadata(response, group, elapsed_time=None, end_time=None):
            logger.debug('Cluster updating: group {0} meta key read time: {1}.{2}'.format(
                group.group_id, elapsed_time.tsec, elapsed_time.tnsec))

            if response.error.code:
                if response.error.code == errors.ELLIPTICS_NOT_FOUND:
                    # This group is some kind of uncoupled group, not an error
                    group.parse_meta(None)
                    logger.info(
                        'Group {group} has no metakey'.format(group=group)
                    )
                elif response.error.code in (
                    # Route list did not contain the group, expected error
                    errors.ELLIPTICS_GROUP_NOT_IN_ROUTE_LIST,
                    # Timeout in reading metakey from the group, expected error
                    errors.ELLIPTICS_TIMEOUT,
                ):
                    group.reset_meta()
                    logger.error(
                        'Error on updating metakey from group {group}: {error}'.format(
                            group=group,
                            error=response.error.message,
                        )
                    )
                else:
                    raise RuntimeError(response.error.mssage)

                return

            meta = response.data

            group.parse_meta(meta)

            if group.type == storage.Group.TYPE_UNCOUPLED_LRC_8_2_2_V1:
                return

            ns_id = group.meta.get('namespace')
            if ns_id is None:
                logger.error(
                    'Inconsistent meta read from group {group}, missing namespace: {meta}'.format(
                        group=group,
                        meta=group.meta,
                    )
                )
                return

            if group.type == storage.Group.TYPE_DATA:
                groups = _get_data_groups(group)
            elif group.type == storage.Group.TYPE_LRC_8_2_2_V1:
                groups = _get_lrc_groups(group)
            elif group.type == storage.Group.TYPE_CACHE:
                groups = _get_data_groups(group)
            else:
                raise RuntimeError(
                    'Group {group_id}, unexpected type to process: {type}'.format(
                        group_id=group.group_id,
                        type=group.type,
                    )
                )

            logger.info('Read symmetric groups from group {}: {}'.format(group.group_id, groups))

            for gid in groups:
                if gid != group.group_id:
                    logger.info('Scheduling update for group {}'.format(gid))
                    _queue.add(gid)

            groupset = _create_groupset_if_needed(groups, group.type, ns_id)

            if group.type == storage.Group.TYPE_LRC_8_2_2_V1:
                # TODO: this will become unnecessary when new "Couple" instance
                # is introduced
                data_groups = _get_data_groups(group)
                data_groupset = _create_groupset_if_needed(
                    data_groups,
                    storage.Group.TYPE_DATA,
                    ns_id
                )
                data_groupset.lrc822v1_groupset = groupset
                # TODO: this should point to a new "Couple" object
                groupset.couple = data_groupset
            return

        try:
            check_groups = groups or storage.groups.keys()

            results = {}
            for group in check_groups:
                session = self.__session.clone()
                session.set_exceptions_policy(elliptics.exceptions_policy.no_exceptions)
                session.set_filter(elliptics.filters.all_with_ack)
                session.add_groups([group.group_id])

                logger.debug('Request to read {0} for group {1}'.format(
                    keys.SYMMETRIC_GROUPS_KEY.replace('\0', '\\0'), group.group_id))
                results[group.group_id] = session.read_data(keys.SYMMETRIC_GROUPS_KEY)

            jobs = {}
            if self.job_finder:
                try:
                    params = {'statuses': Job.ACTIVE_STATUSES}
                    if groups:
                        params['groups'] = [g.group_id for g in groups]
                    for job in self.job_finder.jobs(**params):
                        # TODO: this should definitely be done some other way
                        if hasattr(job, 'group'):
                            jobs[job.group] = job
                except Exception as e:
                    logger.exception('Failed to fetch pending jobs: {0}'.format(e))
                    pass

            while results:
                # TODO: Think on queue, it does not work well with lrc couples
                if _queue:
                    group_id = _queue.pop()
                    if group_id not in results:
                        continue
                    result = results.pop(group_id)
                else:
                    group_id, result = results.popitem()

                group = storage.groups[group_id]

                try:
                    h.process_elliptics_async_result(
                        result,
                        _process_group_metadata,
                        group,
                        raise_on_error=False,
                    )
                except Exception as e:
                    logger.exception(
                        'Critical error on updating metakey from group {}'.format(group_id)
                    )
                    group.parse_meta(None)
                finally:
                    try:
                        group.set_active_job(jobs.get(group.group_id))
                    except Exception as e:
                        logger.exception('Failed to set group active job: {}'.format(e))
                        pass
                    try:
                        group.update_status_recursive()
                    except Exception as e:
                        logger.exception('Failed to update group {0} status: {1}'.format(group, e))
                        pass

            if groups is None:
                self.update_couple_settings()
                load_manager.update(storage)
                weight_manager.update(storage)

                infrastructure.schedule_history_update()

        except Exception as e:
            logger.exception('Critical error during symmetric group update')